hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7a19d360fa1e44512450b65f0c0c154f5c643ddb | 8,495 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_11_01/aio/operations/_load_balancer_probes_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
]
| 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_11_01/aio/operations/_load_balancer_probes_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
]
| 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_11_01/aio/operations/_load_balancer_probes_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
]
| 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerProbesOperations:
"""LoadBalancerProbesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["_models.LoadBalancerProbeListResult"]:
"""Gets all the load balancer probes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerProbeListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_11_01.models.LoadBalancerProbeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerProbeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerProbeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
probe_name: str,
**kwargs
) -> "_models.Probe":
"""Gets load balancer probe.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param probe_name: The name of the probe.
:type probe_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Probe, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_11_01.models.Probe
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Probe"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'probeName': self._serialize.url("probe_name", probe_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Probe', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'} # type: ignore
| 47.458101 | 192 | 0.665097 |
ca6e181ba06535a5b98e3635481a8168280357db | 3,481 | py | Python | op/usematplot/intro1.py | RustFisher/python-playground | 3851eedd3db58d0e7de149da35f44356c7caa3f6 | [
"MIT"
]
| 2 | 2021-07-22T01:58:16.000Z | 2021-07-22T06:24:13.000Z | op/usematplot/intro1.py | RustFisher/python-playground | 3851eedd3db58d0e7de149da35f44356c7caa3f6 | [
"MIT"
]
| null | null | null | op/usematplot/intro1.py | RustFisher/python-playground | 3851eedd3db58d0e7de149da35f44356c7caa3f6 | [
"MIT"
]
| null | null | null | import math
import matplotlib.pyplot as plt
def demo1():
x_list = []
y_list = []
for i in range(0, 100):
x_list.append(i)
y_list.append(math.sin(i * 0.1))
ax = plt.gca()
ax.set_title('rustfisher.com mapplotlib example')
ax.set_xlabel('x')
ax.set_ylabel('y = sin(x)')
ax.grid()
plt.plot(x_list, y_list)
plt.show()
# plt.savefig('out.png')
def demo_subplot():
x_list = []
y_list = []
y2_list = []
for i in range(0, 365):
x_list.append(i)
y_list.append(math.sin(i * 0.1))
y2_list.append(math.cos(i * 0.1))
fig, (ax1, ax2) = plt.subplots(2)
ax1.set_title('rustfisher.com 1')
ax2.set_title('rustfisher.com 2')
ax1.set_xlabel('x')
ax1.set_ylabel('y = sin(x)')
ax2.set_xlabel('x')
ax2.set_ylabel('y = cos(x)')
ax1.plot(x_list, y_list)
ax2.plot(x_list, y2_list)
fig.tight_layout()
plt.show()
def demo3():
x_list = []
y_list = []
y2_list = []
for i in range(0, 365):
x_list.append(i)
y_list.append(math.sin(i * 0.1))
y2_list.append(math.cos(i * 0.1))
fig, (ax1, ax2) = plt.subplots(2)
ax1.set_title('rustfisher.com 1')
ax1.set_xlabel('x')
ax1.set_ylabel('y = sin(x)')
ax2.set_title('rustfisher.com 2')
ax2.set_xlabel('x')
ax2.set_ylabel('y = cos(x)')
ax1.set_xlim(left=50, right=200.6)
ax1.set_ylim(top=1, bottom=0.3)
ax2.set_xlim(xmin=1, xmax=156.6)
ax2.set_ylim(ymin=-0.3, ymax=0.3)
ax1.plot(x_list, y_list)
ax2.plot(x_list, y2_list)
ax1.set_xticks([50, 60, 70, 150])
ax1.set_yticks([0.1, 0.2, 0.3, 0.7, 0.9])
ax1.grid() # 显示格子
ax2.set_xticks([1, 60, 70, 150], minor=True)
ax2.set_yticks([-0.1, 0, 0.1, 0.3], minor=True)
ax2.grid()
plt.setp(ax1.xaxis.get_majorticklabels(), rotation=-45)
plt.setp(ax2.xaxis.get_majorticklabels(), rotation=-60)
fig.tight_layout()
plt.show()
def demo_spine():
x_list = []
y_list = []
for i in range(0, 365):
x_list.append(i)
y_list.append(math.sin(i * 0.1))
fig, (ax1, ax2, ax3) = plt.subplots(3)
ax_list = [ax1, ax2, ax3]
for i in range(0, 3):
cur_ax = ax_list[i]
cur_ax.set_title('rustfisher.com ' + str(i))
cur_ax.plot(x_list, y_list)
cur_ax.set_xlabel('x')
cur_ax.set_ylabel('y = sin(x)')
ax1.spines.right.set_visible(False)
ax1.spines.top.set_visible(False)
ax2.spines.bottom.set_visible(False)
ax2.spines.left.set_visible(False)
ax2.yaxis.set_ticks_position('right')
ax2.xaxis.set_ticks_position('top')
ax3.spines.left.set_bounds(-0.5, 0.5)
ax3.spines.top.set_bounds(340, 400)
ax3.spines.bottom.set_linewidth(2)
fig.tight_layout()
plt.show()
def demo_line():
x_list = []
y_list = []
y2_list = []
y3_list = []
for i in range(0, 20):
x_list.append(i)
y_list.append(math.sin(i) * 2 - 4)
y2_list.append(math.sin(i) * 2)
y3_list.append(math.cos(i) * 1.3 + 3)
plt.plot(x_list, y_list, color='blue', linestyle='-.', linewidth=2, markersize=4)
plt.plot(x_list, y2_list, 'go', linewidth=1)
plt.plot(x_list, y3_list, 'r+')
plt.legend(['math.sin(i) * 2 - 4', 'math.sin(i) * 2', 'math.cos(i) * 1.3 + 3'], bbox_to_anchor=(1, 1),
bbox_transform=plt.gcf().transFigure)
plt.show()
if __name__ == '__main__':
print('rustfisher 图表讲解')
demo_line()
| 25.595588 | 106 | 0.587188 |
d0e3cb36f8c6fc6c3df3ebd0f5a0a79c460796ad | 1,625 | py | Python | example.py | andrii-z4i/xmind-sdk-python | d5114550da29b7be440e023125e0e12daab05f46 | [
"MIT"
]
| 10 | 2018-04-09T10:08:32.000Z | 2021-02-13T15:01:12.000Z | example.py | andrii-z4i/xmind-sdk-python | d5114550da29b7be440e023125e0e12daab05f46 | [
"MIT"
]
| 30 | 2017-12-29T20:54:57.000Z | 2021-03-28T21:26:53.000Z | example.py | andrii-z4i/xmind-sdk-python | d5114550da29b7be440e023125e0e12daab05f46 | [
"MIT"
]
| 2 | 2020-10-23T13:19:01.000Z | 2020-11-09T16:15:29.000Z | #-*- coding: utf-8 -*-
import xmind
from xmind.core import workbook,saver
from xmind.core.topic import TopicElement
w = xmind.load("test.xmind") # load an existing file or create a new workbook if nothing is found
s1=w.getPrimarySheet() # get the first sheet
s1.setTitle("first sheet") # set its title
r1=s1.getRootTopic() # get the root topic of this sheet
r1.setTitle("we don't care of this sheet") # set its title
s2=w.createSheet() # create a new sheet
s2.setTitle("second sheet")
r2=s2.getRootTopic()
r2.setTitle("root node")
t1=TopicElement(ownerWorkbook=w) # create a new element
t1.setTopicHyperlink(s1.getID()) # set a link from this topic to the first sheet given by s1.getID()
t1.setTitle("redirection to the first sheet") # set its title
t2=TopicElement(ownerWorkbook=w)
t2.setTitle("second node")
t2.setURLHyperlink("https://xmind.net") # set an hyperlink
t3=TopicElement(ownerWorkbook=w)
t3.setTitle("third node")
t3.setPlainNotes("notes for this topic") # set notes (F4 in XMind)
t3.setTitle("topic with \n notes")
t4=TopicElement(ownerWorkbook=w)
t4.setFileHyperlink("logo.jpeg") # set a file hyperlink
t4.setTitle("topic with a file")
# then the topics must be added to the root element
r2.addSubTopic(t1)
r2.addSubTopic(t2)
r2.addSubTopic(t3)
r2.addSubTopic(t4)
topics=r2.getSubTopics() # to loop on the subTopics
for topic in topics:
topic.addMarker("yes")
w.addSheet(s2) # the second sheet is now added to the workbook
rel=s2.createRelationship(t1.getID(),t2.getID(),"test") # create a relationship
s2.addRelationship(rel) # and add to the sheet
xmind.save(w,"test2.xmind") # and we save | 31.25 | 100 | 0.748923 |
fcfe0471669f19c20befcafa9ec58af5df9671ad | 11,236 | py | Python | src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py | yhavinga/transformers | 9932ee4b4bca9045d941af6687ef69eedcf68483 | [
"Apache-2.0"
]
| 1 | 2022-03-16T13:02:15.000Z | 2022-03-16T13:02:15.000Z | src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py | yhavinga/transformers | 9932ee4b4bca9045d941af6687ef69eedcf68483 | [
"Apache-2.0"
]
| 2 | 2022-03-14T10:13:16.000Z | 2022-03-14T11:50:27.000Z | src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py | yhavinga/transformers | 9932ee4b4bca9045d941af6687ef69eedcf68483 | [
"Apache-2.0"
]
| 2 | 2022-03-21T04:32:39.000Z | 2022-03-22T01:02:49.000Z | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature extractor class for Wav2Vec2
"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...file_utils import PaddingStrategy, TensorType
from ...utils import logging
logger = logging.get_logger(__name__)
class Wav2Vec2FeatureExtractor(SequenceFeatureExtractor):
r"""
Constructs a Wav2Vec2 feature extractor.
This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
most of the main methods. Users should refer to this superclass for more information regarding those methods.
Args:
feature_size (`int`, defaults to 1):
The feature dimension of the extracted features.
sampling_rate (`int`, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in Hertz per second (Hz).
padding_value (`float`, defaults to 0.0):
The value that is used to fill the padding values.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to zero-mean unit-variance normalize the input. Normalizing can help to significantly
improve the performance for some models, *e.g.*,
[wav2vec2-lv60](https://huggingface.co/models?search=lv60).
return_attention_mask (`bool`, *optional*, defaults to `False`):
Whether or not [`~Wav2Vec2FeatureExtractor.__call__`] should return `attention_mask`.
<Tip>
Wav2Vec2 models that have set `config.feat_extract_norm == "group"`, such as
[wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), have **not** been trained using
`attention_mask`. For such models, `input_values` should simply be padded with 0 and no `attention_mask`
should be passed.
For Wav2Vec2 models that have set `config.feat_extract_norm == "layer"`, such as
[wav2vec2-lv60](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self), `attention_mask` should be
passed for batched inference.
</Tip>"""
model_input_names = ["input_values", "attention_mask"]
def __init__(
self,
feature_size=1,
sampling_rate=16000,
padding_value=0.0,
return_attention_mask=False,
do_normalize=True,
**kwargs
):
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
self.return_attention_mask = return_attention_mask
self.do_normalize = do_normalize
@staticmethod
def zero_mean_unit_var_norm(
input_values: List[np.ndarray], attention_mask: List[np.ndarray], padding_value: float = 0.0
) -> List[np.ndarray]:
"""
Every array in the list is normalized to have zero mean and unit variance
"""
if attention_mask is not None:
attention_mask = np.array(attention_mask, np.int32)
normed_input_values = []
for vector, length in zip(input_values, attention_mask.sum(-1)):
normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
normed_slice[length:] = padding_value
normed_input_values.append(normed_slice)
else:
normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __call__(
self,
raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
padding: Union[bool, str, PaddingStrategy] = False,
max_length: Optional[int] = None,
truncation: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
sampling_rate: Optional[int] = None,
**kwargs
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s). sequences.
Args:
raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
values, a list of numpy arrays or a list of list of float values.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
>= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
<Tip>
Wav2Vec2 models that have set `config.feat_extract_norm == "group"`, such as
[wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base-960h), have **not** been trained using
`attention_mask`. For such models, `input_values` should simply be padded with 0 and no
`attention_mask` should be passed.
For Wav2Vec2 models that have set `config.feat_extract_norm == "layer"`, such as
[wav2vec2-lv60](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self), `attention_mask` should
be passed for batched inference.
</Tip>
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
padding_value (`float`, defaults to 0.0):
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of {self.sampling_rate}. "
f"Please make sure that the provided `raw_speech` input was sampled with {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug."
)
is_batched = bool(
isinstance(raw_speech, (list, tuple))
and (isinstance(raw_speech[0], np.ndarray) or isinstance(raw_speech[0], (tuple, list)))
)
# always return batch
if not is_batched:
raw_speech = [raw_speech]
# convert into correct format for padding
encoded_inputs = BatchFeature({"input_values": raw_speech})
padded_inputs = self.pad(
encoded_inputs,
padding=padding,
max_length=max_length,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
# convert input values to correct format
input_values = padded_inputs["input_values"]
if not isinstance(input_values[0], np.ndarray):
padded_inputs["input_values"] = [np.asarray(array, dtype=np.float32) for array in input_values]
elif (
not isinstance(input_values, np.ndarray)
and isinstance(input_values[0], np.ndarray)
and input_values[0].dtype is np.dtype(np.float64)
):
padded_inputs["input_values"] = [array.astype(np.float32) for array in input_values]
elif isinstance(input_values, np.ndarray) and input_values.dtype is np.dtype(np.float64):
padded_inputs["input_values"] = input_values.astype(np.float32)
# convert attention_mask to correct format
attention_mask = padded_inputs.get("attention_mask")
if attention_mask is not None:
padded_inputs["attention_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
# zero-mean and unit-variance normalization
if self.do_normalize:
attention_mask = (
attention_mask
if self._get_padding_strategies(padding, max_length=max_length) is not PaddingStrategy.DO_NOT_PAD
else None
)
padded_inputs["input_values"] = self.zero_mean_unit_var_norm(
padded_inputs["input_values"], attention_mask=attention_mask, padding_value=self.padding_value
)
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
| 47.210084 | 140 | 0.643734 |
81536ffbda91d5c74e3810053bf64c14192bcca0 | 10,947 | py | Python | pyfpm/pattern.py | martinblech/pyfpm | 8351c3b7979260ddc423c2b42a100360e434c38a | [
"MIT"
]
| 25 | 2015-01-06T15:14:51.000Z | 2021-04-22T00:54:59.000Z | pyfpm/pattern.py | martinblech/pyfpm | 8351c3b7979260ddc423c2b42a100360e434c38a | [
"MIT"
]
| 1 | 2019-10-18T03:01:45.000Z | 2019-10-18T03:01:45.000Z | pyfpm/pattern.py | martinblech/pyfpm | 8351c3b7979260ddc423c2b42a100360e434c38a | [
"MIT"
]
| 5 | 2015-09-18T10:03:22.000Z | 2018-07-26T05:03:53.000Z | """
This module holds the actual pattern implementations.
End users should not normally have to deal with it, except for constructing
patterns programatically without making use of the pattern syntax parser.
"""
import re
try:
# python 2.x base string
_basestring = basestring
except NameError:
# python 3.x base string
_basestring = str
class Match(object):
"""
Represents the result of matching successfully a pattern against an
object. The `ctx` attribute is a :class:`dict` that contains the value for
each bound name in the pattern, if any.
"""
def __init__(self, ctx=None, value=None):
if ctx is None:
ctx = {}
self.ctx = ctx
self.value = value
def __eq__(self, other):
return (isinstance(other, Match) and
self.__dict__ == other.__dict__)
def __repr__(self):
return 'Match(%s)' % self.ctx
class Pattern(object):
"""
Base Pattern class. Abstracts the behavior common to all pattern types,
such as name bindings, conditionals and operator overloading for combining
several patterns.
"""
def __init__(self):
self.bound_name = None
self.condition = None
def match(self, other, ctx=None):
"""
Match this pattern against an object. Operator: `<<`.
:param other: the object this pattern should be matched against.
:param ctx: optional context. If none, an empty one will be
automatically created.
:type ctx: dict
:returns: a :class:`Match` if successful, `None` otherwise.
"""
match = self._does_match(other, ctx)
if match:
ctx = match.ctx
value = match.value or other
if self.bound_name:
if ctx is None:
ctx = {}
try:
previous = ctx[self.bound_name]
if previous != value:
return None
except KeyError:
ctx[self.bound_name] = value
if self.condition is None or self.condition(**ctx):
return Match(ctx)
return None
def __lshift__(self, other):
return self.match(other)
def bind(self, name):
"""Bind this pattern to the given name. Operator: `%`."""
self.bound_name = name
return self
def __mod__(self, name):
return self.bind(name)
def if_(self, condition):
"""
Add a boolean condition to this pattern. Operator: `/`.
:param condition: must accept the match context as keyword
arguments and return a boolean-ish value.
:type condition: callable
"""
self.condition = condition
return self
def __div__(self, condition):
return self.if_(condition)
def __truediv__(self, condition):
return self.if_(condition)
def multiply(self, n):
"""
Build a :class:`ListPattern` that matches `n` instances of this pattern.
Operator: `*`.
Example:
>>> p = EqualsPattern(1).multiply(3)
>>> p.match((1, 1, 1))
Match({})
"""
return build(*([self]*n))
def __mul__(self, length):
return self.multiply(length)
def __rmul__(self, length):
return self.multiply(length)
def or_with(self, other):
"""
Build a new :class:`OrPattern` with this or the other pattern.
Operator: `|`.
Example:
>>> p = EqualsPattern(1).or_with(InstanceOfPattern(str))
>>> p.match('hello')
Match({})
>>> p.match(1)
Match({})
>>> p.match(2)
"""
patterns = []
for pattern in (self, other):
if isinstance(pattern, OrPattern):
patterns.extend(pattern.patterns)
else:
patterns.append(pattern)
return OrPattern(*patterns)
def __or__(self, other):
return self.or_with(other)
def head_tail_with(self, other):
"""
Head-tail concatenate this pattern with the other. The lhs pattern will
be the head and the other will be the tail. Operator: `+`.
Example:
>>> p = InstanceOfPattern(int).head_tail_with(ListPattern())
>>> p.match([1])
Match({})
>>> p.match([1, 2])
"""
return ListPattern(self, other)
def __add__(self, other):
return self.head_tail_with(other)
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.__dict__ == other.__dict__)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join('='.join((str(k), repr(v))) for (k, v) in
self.__dict__.items() if v))
class AnyPattern(Pattern):
"""Pattern that matches anything."""
def _does_match(self, other, ctx):
return Match(ctx)
class EqualsPattern(Pattern):
"""Pattern that only matches objects that equal the given object."""
def __init__(self, obj):
super(EqualsPattern, self).__init__()
self.obj = obj
def _does_match(self, other, ctx):
if self.obj == other:
return Match(ctx)
else:
return None
class InstanceOfPattern(Pattern):
"""Pattern that only matches instances of the given class."""
def __init__(self, cls):
super(InstanceOfPattern, self).__init__()
self.cls = cls
def _does_match(self, other, ctx):
if isinstance(other, self.cls):
return Match(ctx)
else:
return None
_CompiledRegex = type(re.compile(''))
class RegexPattern(Pattern):
"""Pattern that only matches strings that match the given regex."""
def __init__(self, regex):
super(RegexPattern, self).__init__()
if not isinstance(regex, _CompiledRegex):
regex = re.compile(regex)
self.regex = regex
def _does_match(self, other, ctx):
re_match = self.regex.match(other)
if re_match:
return Match(ctx, re_match.groups())
return None
class ListPattern(Pattern):
"""Pattern that only matches iterables whose head matches `head_pattern` and
whose tail matches `tail_pattern`"""
def __init__(self, head_pattern=None, tail_pattern=None):
super(ListPattern, self).__init__()
if head_pattern is not None and tail_pattern is None:
tail_pattern = ListPattern()
self.head_pattern = head_pattern
self.tail_pattern = tail_pattern
def head_tail_with(self, other):
return ListPattern(self.head_pattern,
self.tail_pattern.head_tail_with(other))
def _does_match(self, other, ctx):
try:
if (self.head_pattern is None and
self.tail_pattern is None and
len(other) == 0):
return Match(ctx)
except TypeError:
return None
if isinstance(other, _basestring):
return None
try:
head, tail = other[0], other[1:]
except (IndexError, TypeError):
return None
if self.head_pattern is not None:
match = self.head_pattern.match(head, ctx)
if match:
ctx = match.ctx
match = self.tail_pattern.match(tail, ctx)
if match:
ctx = match.ctx
else:
return None
else:
return None
else:
if len(other):
return None
return Match(ctx)
class NamedTuplePattern(Pattern):
"""Pattern that only matches named tuples of the given class and whose
contents match the given patterns."""
def __init__(self, casecls, *initpatterns):
super(NamedTuplePattern, self).__init__()
self.casecls_pattern = InstanceOfPattern(casecls)
if (len(initpatterns) == 1 and
isinstance(initpatterns[0], ListPattern)):
self.initargs_pattern = initpatterns[0]
else:
self.initargs_pattern = build(*initpatterns, **dict(is_list=True))
def _does_match(self, other, ctx):
match = self.casecls_pattern.match(other, ctx)
if not match:
return None
ctx = match.ctx
return self.initargs_pattern.match(other, ctx)
class OrPattern(Pattern):
"""Pattern that matches whenever any of the inner patterns match."""
def __init__(self, *patterns):
if len(patterns) < 2:
raise ValueError('need at least two patterns')
super(OrPattern, self).__init__()
self.patterns = patterns
def _does_match(self, other, ctx):
for pattern in self.patterns:
if ctx is not None:
ctx_ = ctx.copy()
else:
ctx_ = None
match = pattern.match(other, ctx_)
if match:
return match
return None
def build(*args, **kwargs):
"""
Shorthand pattern factory.
Examples:
>>> build() == AnyPattern()
True
>>> build(1) == EqualsPattern(1)
True
>>> build('abc') == EqualsPattern('abc')
True
>>> build(str) == InstanceOfPattern(str)
True
>>> build(re.compile('.*')) == RegexPattern('.*')
True
>>> build(()) == build([]) == ListPattern()
True
>>> build([1]) == build((1,)) == ListPattern(EqualsPattern(1),
... ListPattern())
True
>>> build(int, str, 'a') == ListPattern(InstanceOfPattern(int),
... ListPattern(InstanceOfPattern(str),
... ListPattern(EqualsPattern('a'))))
True
>>> try:
... from collections import namedtuple
... MyTuple = namedtuple('MyTuple', 'a b c')
... build(MyTuple(1, 2, 3)) == NamedTuplePattern(MyTuple, 1, 2, 3)
... except ImportError:
... True
True
"""
arglen = len(args)
if arglen > 1:
head, tail = args[0], args[1:]
return ListPattern(build(head), build(*tail, **(dict(is_list=True))))
if arglen == 0:
return AnyPattern()
(arg,) = args
if kwargs.get('is_list', False):
return ListPattern(build(arg))
if isinstance(arg, Pattern):
return arg
if isinstance(arg, _CompiledRegex):
return RegexPattern(arg)
if isinstance(arg, tuple) and hasattr(arg, '_fields'):
return NamedTuplePattern(arg.__class__, *map(build, arg))
if isinstance(arg, type):
return InstanceOfPattern(arg)
if isinstance(arg, (tuple, list)):
if len(arg) == 0:
return ListPattern()
return build(*arg, **(dict(is_list=True)))
return EqualsPattern(arg)
| 30.923729 | 80 | 0.566548 |
b43dabd546e2dfe9a577038cfd5aad99f4826480 | 486 | py | Python | output/models/ms_data/regex/re_dg5_xsd/re_dg5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/regex/re_dg5_xsd/re_dg5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/regex/re_dg5_xsd/re_dg5.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
]
| null | null | null | from dataclasses import dataclass, field
from typing import List, Optional
@dataclass
class Regex:
att: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"pattern": r"--0[123]\-(12|14)",
}
)
@dataclass
class Doc:
class Meta:
name = "doc"
elem: List[Regex] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
}
)
| 17.357143 | 44 | 0.50823 |
d19c6e78dd238a277cab37575208c81b3b13faf5 | 14,205 | py | Python | ycmd_client.py | Chiel92/fateycmd | 052fd7b2a352dc42cf55a33037024c410d3ac046 | [
"MIT"
]
| 2 | 2015-08-03T14:42:44.000Z | 2015-08-03T16:19:39.000Z | ycmd_client.py | Chiel92/fateycmd | 052fd7b2a352dc42cf55a33037024c410d3ac046 | [
"MIT"
]
| null | null | null | ycmd_client.py | Chiel92/fateycmd | 052fd7b2a352dc42cf55a33037024c410d3ac046 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
#
# Copyright (C) 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import debug, error
from base64 import b64encode, b64decode
import hashlib
import hmac
import json
import os
import socket
import subprocess
import tempfile
import urllib
import time
import requests
from enum import Enum
HMAC_HEADER = 'X-Ycm-Hmac'
HMAC_SECRET_LENGTH = 16
SERVER_IDLE_SUICIDE_SECONDS = 10800 # 3 hours
MAX_SERVER_WAIT_TIME_SECONDS = 5
# Set this to True to see ycmd's output interleaved with the client's
INCLUDE_YCMD_OUTPUT = False
DEFINED_SUBCOMMANDS_HANDLER = '/defined_subcommands'
CODE_COMPLETIONS_HANDLER = '/completions'
COMPLETER_COMMANDS_HANDLER = '/run_completer_command'
EVENT_HANDLER = '/event_notification'
EXTRA_CONF_HANDLER = '/load_extra_conf_file'
DIR_OF_THIS_SCRIPT = os.path.dirname(os.path.abspath(__file__))
PATH_TO_YCMD = os.path.join(DIR_OF_THIS_SCRIPT, 'ycmd', 'ycmd')
PATH_TO_EXTRA_CONF = os.path.join(DIR_OF_THIS_SCRIPT, 'ycmd', 'examples',
'.ycm_extra_conf.py')
class Event(Enum):
FileReadyToParse = 1
BufferUnload = 2
BufferVisit = 3
InsertLeave = 4
CurrentIdentifierFinished = 5
# Wrapper around ycmd's HTTP+JSON API
class YcmdHandle(object):
def __init__(self, popen_handle, port, hmac_secret):
self._popen_handle = popen_handle
self._port = port
self._hmac_secret = hmac_secret
self._server_location = 'http://127.0.0.1:' + str(port)
@classmethod
def StartYcmdAndReturnHandle(cls):
prepared_options = DefaultSettings()
hmac_secret = os.urandom(HMAC_SECRET_LENGTH)
b64_hmac_secret = b64encode(hmac_secret)
ascii_hmac_secret = b64_hmac_secret.decode('ascii')
prepared_options['hmac_secret'] = ascii_hmac_secret
# The temp options file is deleted by ycmd during startup
with tempfile.NamedTemporaryFile(delete=False) as options_file:
result = json.dumps(prepared_options).encode('utf-8')
options_file.write(result)
options_file.flush()
server_port = GetUnusedLocalhostPort()
ycmd_args = ['python',
PATH_TO_YCMD,
'--port={0}'.format(server_port),
'--options_file={0}'.format(options_file.name),
'--idle_suicide_seconds={0}'.format(
SERVER_IDLE_SUICIDE_SECONDS)]
std_handles = None if INCLUDE_YCMD_OUTPUT else subprocess.PIPE
child_handle = subprocess.Popen(ycmd_args,
stdout=std_handles,
stderr=std_handles)
return cls(child_handle, server_port, hmac_secret)
def IsAlive(self):
returncode = self._popen_handle.poll()
# When the process hasn't finished yet, poll() returns None.
return returncode is None
def IsReady(self, include_subservers=False):
if not self.IsAlive():
return False
params = {'include_subservers': 1} if include_subservers else None
try:
response = self.GetFromHandler('ready', params)
response.raise_for_status()
except requests.ConnectionError as e:
error(e)
return False
return response.json()
def Shutdown(self):
if self.IsAlive():
self._popen_handle.terminate()
def GetFromHandler(self, handler, params=None):
response = requests.get(self._BuildUri(handler),
headers=self._ExtraHeaders(),
params=params)
self._ValidateResponseObject(response)
return response
def PostToHandler(self, handler, data, params=None):
response = requests.post(self._BuildUri(handler),
json=data,
headers=self._ExtraHeaders(json.dumps(data)),
params=params)
self._ValidateResponseObject(response)
return response
#def SendDefinedSubcommandsRequest(self, completer_target):
#request_json = BuildRequestData(completer_target=completer_target)
#debug('==== Sending defined subcommands request ====')
#self.PostToHandlerAndLog(DEFINED_SUBCOMMANDS_HANDLER, request_json)
def SendCodeCompletionRequest(self, test_filename, filetype, line_num, column_num):
request_json = BuildRequestData(test_filename=test_filename,
filetype=filetype,
line_num=line_num,
column_num=column_num)
debug('==== Sending code-completion request ====')
response = self.PostToHandler(CODE_COMPLETIONS_HANDLER, request_json)
return json.loads(response.text)
#def SendGoToRequest(self, test_filename, filetype, line_num, column_num):
#request_json = BuildRequestData(test_filename=test_filename,
#command_arguments=['GoTo'],
#filetype=filetype,
#line_num=line_num,
#column_num=column_num)
#debug('==== Sending GoTo request ====')
#self.PostToHandlerAndLog(COMPLETER_COMMANDS_HANDLER, request_json)
def SendEventNotification(self, event_enum, test_filename, filetype,
line_num=1, # just placeholder values
column_num=1,
extra_data=None):
request_json = BuildRequestData(test_filename=test_filename,
filetype=filetype,
line_num=line_num,
column_num=column_num)
if extra_data:
request_json.update(extra_data)
request_json['event_name'] = event_enum.name
debug('==== Sending event notification ====')
response = self.PostToHandler(EVENT_HANDLER, request_json)
return json.loads(response.text)
def LoadExtraConfFile(self, extra_conf_filename):
request_json = {'filepath': extra_conf_filename}
self.PostToHandler(EXTRA_CONF_HANDLER, request_json)
def WaitUntilReady(self, include_subservers=False):
total_slept = 0
time.sleep(0.5)
total_slept += 0.5
while True:
try:
if total_slept > MAX_SERVER_WAIT_TIME_SECONDS:
raise RuntimeError(
'waited for the server for {0} seconds, aborting'.format(
MAX_SERVER_WAIT_TIME_SECONDS))
if self.IsReady(include_subservers):
return
except requests.exceptions.ConnectionError:
pass
finally:
time.sleep(0.1)
total_slept += 0.1
def _ExtraHeaders(self, request_body=None):
return {HMAC_HEADER: self._HmacForBody(request_body)}
def _HmacForBody(self, request_body=''):
result = CreateHexHmac(request_body, self._hmac_secret).encode('utf-8')
return b64encode(result).decode('utf-8')
def _BuildUri(self, handler):
return urllib.parse.urljoin(self._server_location, handler)
def _ValidateResponseObject(self, response):
if not ContentHexHmacValid(response.content,
b64decode(response.headers[HMAC_HEADER]),
self._hmac_secret):
raise RuntimeError('Received invalid HMAC for response!')
return True
def ContentHexHmacValid(content, hmac_value, hmac_secret):
return hmac.compare_digest(CreateHexHmac(content, hmac_secret).encode('utf-8'),
hmac_value)
def CreateHexHmac(content, hmac_secret):
"""Returns bytes object"""
# Must ensure that hmac_secret is str and not unicode
if type(content) == str:
content = content.encode('utf-8')
result = hmac.new(bytes(hmac_secret),
msg=content,
digestmod=hashlib.sha256)
return result.hexdigest()
def PathToTestFile(filename):
return os.path.join(DIR_OF_THIS_SCRIPT, 'ycmd', 'examples', 'samples', filename)
def DefaultSettings():
default_options_path = os.path.join(DIR_OF_THIS_SCRIPT, 'ycmd', 'ycmd',
'default_settings.json')
with open(default_options_path) as f:
return json.loads(f.read())
def GetUnusedLocalhostPort():
sock = socket.socket()
# This tells the OS to give us any free port in the range [1024 - 65535]
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
def PrettyPrintDict(value):
# Sad that this works better than pprint...
return json.dumps(value, sort_keys=True, indent=2).replace(
'\\n', '\n')
def BuildRequestData(test_filename=None,
filetype=None,
line_num=None,
column_num=None,
command_arguments=None,
completer_target=None):
test_path = PathToTestFile(test_filename) if test_filename else ''
# Normally, this would be the contents of the file as loaded in the editor
# (possibly unsaved data).
contents = open(test_path).read() if test_path else ''
data = {
'line_num': line_num,
'column_num': column_num,
'filepath': test_path,
'file_data': {
test_path: {
'filetypes': [filetype],
'contents': contents
}
}
}
if command_arguments:
data['command_arguments'] = command_arguments
if completer_target:
data['completer_target'] = completer_target
return data
def Main():
print('Trying to start server...')
server = YcmdHandle.StartYcmdAndReturnHandle()
server.WaitUntilReady()
LanguageAgnosticIdentifierCompletion(server)
# server.GetFromHandler()
# PythonSemanticCompletionResults(server)
# CppSemanticCompletionResults(server)
# CsharpSemanticCompletionResults(server)
# This will ask the server for a list of subcommands supported by a given
# language completer.
# PythonGetSupportedCommands(server)
# GoTo is an example of a completer subcommand.
# Python and C# completers also support the GoTo subcommand.
# CppGotoDeclaration(server)
print('Shutting down server...')
server.Shutdown()
def LanguageAgnosticIdentifierCompletion(server):
# We're using JavaScript here, but the language doesn't matter; the identifier
# completion engine just extracts identifiers.
# server.SendEventNotification(Event.FileReadyToParse,
# test_filename='some_javascript.js',
# filetype='javascript')
print(server.SendCodeCompletionRequest(test_filename='some_javascript.js',
filetype='javascript',
line_num=21,
column_num=6))
def PythonSemanticCompletionResults(server):
server.SendEventNotification(Event.FileReadyToParse,
test_filename='some_python.py',
filetype='python')
server.SendCodeCompletionRequest(test_filename='some_python.py',
filetype='python',
line_num=27,
column_num=6)
def CppSemanticCompletionResults(server):
server.LoadExtraConfFile(PATH_TO_EXTRA_CONF)
# NOTE: The server will return diagnostic information about an error in the
# some_cpp.cpp file that we placed there intentionally (as an example).
# Clang will recover from this error and still manage to parse the file
# though.
server.SendEventNotification(Event.FileReadyToParse,
test_filename='some_cpp.cpp',
filetype='cpp')
server.SendCodeCompletionRequest(test_filename='some_cpp.cpp',
filetype='cpp',
line_num=25,
column_num=7)
def PythonGetSupportedCommands(server):
server.SendDefinedSubcommandsRequest(completer_target='python')
def CppGotoDeclaration(server):
# NOTE: No need to load extra conf file or send FileReadyToParse event, it was
# already done in CppSemanticCompletionResults.
server.SendGoToRequest(test_filename='some_cpp.cpp',
filetype='cpp',
line_num=23,
column_num=4)
def CsharpSemanticCompletionResults(server):
# First such request starts the OmniSharpServer
server.SendEventNotification(Event.FileReadyToParse,
test_filename='some_csharp.cs',
filetype='cs')
# We have to wait until OmniSharpServer has started and loaded the solution
# file
debug('Waiting for OmniSharpServer to become ready...')
server.WaitUntilReady(include_subservers=True)
server.SendCodeCompletionRequest(test_filename='some_csharp.cs',
filetype='cs',
line_num=10,
column_num=15)
if __name__ == "__main__":
Main()
| 37.480211 | 87 | 0.611334 |
b1a74d2b005ac13e2c324ca74bccd1425ca1abfc | 4,934 | py | Python | var/spack/repos/builtin/packages/rocprofiler-dev/package.py | HigherOrderMethods/spack | 87ed3fcc59fc25ce250042338d082925e3a3610b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| 2 | 2021-03-05T10:54:32.000Z | 2021-03-05T14:14:52.000Z | var/spack/repos/builtin/packages/rocprofiler-dev/package.py | HigherOrderMethods/spack | 87ed3fcc59fc25ce250042338d082925e3a3610b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| 32 | 2020-12-15T17:29:20.000Z | 2022-03-21T15:08:31.000Z | var/spack/repos/builtin/packages/rocprofiler-dev/package.py | HigherOrderMethods/spack | 87ed3fcc59fc25ce250042338d082925e3a3610b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
]
| 2 | 2021-07-19T20:31:27.000Z | 2021-07-19T21:14:14.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RocprofilerDev(CMakePackage):
"""ROCPROFILER library for AMD HSA runtime API extension support"""
homepage = "https://github.com/ROCm-Developer-Tools/rocprofiler"
git = "https://github.com/ROCm-Developer-Tools/rocprofiler.git"
url = "https://github.com/ROCm-Developer-Tools/rocprofiler/archive/rocm-4.2.0.tar.gz"
maintainers = ['srekolam', 'arjun-raj-kuppala']
version('4.2.0', sha256='c5888eda1404010f88219055778cfeb00d9c21901e172709708720008b1af80f')
version('4.1.0', sha256='2eead5707016da606d636b97f3af1c98cb471da78659067d5a77d4a2aa43ef4c')
version('4.0.0', sha256='e9960940d1ec925814a0e55ee31f5fc2fb23fa839d1c6a909f72dd83f657fb25')
version('3.10.0', sha256='fbf5ce9fbc13ba2b3f9489838e00b54885aba92336f055e8b03fef3e3347071e')
version('3.9.0', sha256='f07ddd9bf2f86550c8d243f887e9bde9d4f2ceec81ecc6393012aaf2a45999e8')
version('3.8.0', sha256='38ad3ac20f60f3290ce750c34f0aad442354b1d0a56b81167a018e44ecdf7fff')
version('3.7.0', sha256='d3f03bf850cbd86ca9dfe6e6cc6f559d8083b0f3ea4711d8260b232cb6fdd1cc')
version('3.5.0', sha256='c42548dd467b7138be94ad68c715254eb56a9d3b670ccf993c43cd4d43659937')
depends_on('cmake@3:', type='build')
for ver in ['3.5.0', '3.7.0', '3.8.0', '3.9.0', '3.10.0', '4.0.0', '4.1.0',
'4.2.0']:
depends_on('hsakmt-roct@' + ver, when='@' + ver)
depends_on('hsa-rocr-dev@' + ver, when='@' + ver)
depends_on('rocminfo@' + ver, when='@' + ver)
resource(name='roctracer-dev',
url='https://github.com/ROCm-Developer-Tools/roctracer/archive/rocm-3.5.0.tar.gz',
sha256='7af5326c9ca695642b4265232ec12864a61fd6b6056aa7c4ecd9e19c817f209e',
expand=True,
destination='',
placement='roctracer',
when='@3.5.0')
resource(name='roctracer-dev',
url='https://github.com/ROCm-Developer-Tools/roctracer/archive/rocm-3.7.0.tar.gz',
sha256='6fa5b771e990f09c242237ab334b9f01039ec7d54ccde993e719c5d6577d1518',
expand=True,
destination='',
placement='roctracer',
when='@3.7.0')
resource(name='roctracer-dev',
url='https://github.com/ROCm-Developer-Tools/roctracer/archive/rocm-3.8.0.tar.gz',
sha256='5154a84ce7568cd5dba756e9508c34ae9fc62f4b0b5731f93c2ad68b21537ed1',
expand=True,
destination='',
placement='roctracer',
when='@3.8.0')
resource(name='roctracer-dev',
url='https://github.com/ROCm-Developer-Tools/roctracer/archive/rocm-3.9.0.tar.gz',
sha256='0678f9faf45058b16923948c66d77ba2c072283c975d167899caef969169b292',
expand=True,
destination='',
placement='roctracer',
when='@3.9.0')
resource(name='roctracer-dev',
url='https://github.com/ROCm-Developer-Tools/roctracer/archive/rocm-3.10.0.tar.gz',
sha256='ac4a1d059fc34377e906071fd0e56f5434a7e0e4ded9db8faf9217a115239dec',
expand=True,
destination='',
placement='roctracer',
when='@3.10.0')
resource(name='roctracer-dev',
url='https://github.com/ROCm-Developer-Tools/roctracer/archive/rocm-4.0.0.tar.gz',
sha256='f47859a46173228b597c463eda850b870e810534af5efd5f2a746067ef04edee',
expand=True,
destination='',
placement='roctracer',
when='@4.0.0')
resource(name='roctracer-dev',
url='https://github.com/ROCm-Developer-Tools/roctracer/archive/rocm-4.1.0.tar.gz',
sha256='5d93de4e92895b6eb5f9d098f5dbd182d33923bd9b2ab69cf5a1abbf91d70695',
expand=True,
destination='',
placement='roctracer',
when='@4.1.0')
resource(name='roctracer-dev',
url='https://github.com/ROCm-Developer-Tools/roctracer/archive/rocm-4.2.0.tar.gz',
sha256='62a9c0cb1ba50b1c39a0636c886ac86e75a1a71cbf5fec05801517ceb0e67a37',
expand=True,
destination='',
placement='roctracer',
when='@4.2.0')
def patch(self):
filter_file('${HSA_RUNTIME_LIB_PATH}/../include',
'${HSA_RUNTIME_LIB_PATH}/../include ${HSA_KMT_LIB_PATH}/..\
/include', 'test/CMakeLists.txt', string=True)
def cmake_args(self):
args = ['-DPROF_API_HEADER_PATH={0}/roctracer/inc/ext'.format(
self.stage.source_path),
'-DROCM_ROOT_DIR:STRING={0}/include'.format(
self.spec['hsakmt-roct'].prefix)
]
return args
| 45.266055 | 96 | 0.637211 |
545d791a4b28bfd4fe8ff953a16084b1bb290abf | 1,283 | py | Python | Blog/blogForms.py | stevenpi/Link.Python.Django.DiyBlog | b5899a9727ce68c069c2f121747b0e74747d005c | [
"MIT"
]
| null | null | null | Blog/blogForms.py | stevenpi/Link.Python.Django.DiyBlog | b5899a9727ce68c069c2f121747b0e74747d005c | [
"MIT"
]
| 57 | 2018-03-09T15:02:57.000Z | 2022-03-11T23:20:29.000Z | Blog/blogForms.py | stevenpi/Link.Python.Django.DiyBlog | b5899a9727ce68c069c2f121747b0e74747d005c | [
"MIT"
]
| null | null | null | from django import forms
from django.contrib.auth.models import User
from django.forms import TextInput, Textarea, FileInput
from django.utils.translation import ugettext_lazy as _
from markdownx.fields import MarkdownxFormField
from Blog.models import Post, Profile
class PostCreateForm(forms.ModelForm):
class Meta:
model = Post
fields = ['title', 'content']
widgets = {
'title': TextInput(),
'content': MarkdownxFormField(),
}
labels = {
'title': _('Title'),
# intentionally set empty, since the framework messes the label up with the markdown editor
'content': ''
}
class UpdateUserForm(forms.ModelForm):
class Meta:
model = User
fields = ['email', 'first_name', 'last_name']
labels = {
'email': _('Email'),
'first_name': _('First Name'),
'last_name': _('Last Name'),
}
class UpdateProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['bio', 'image']
labels = {
'bio': _('Bio'),
'image': _('Image'),
}
widgets = {
'image': FileInput(attrs={'data-role': 'file', 'dir': 'rtl'}),
}
| 24.207547 | 103 | 0.558067 |
855648bf998a6dd371725815622a112cc14e6e6b | 1,645 | py | Python | lib/python/treadmill/websocket/api/identity_group.py | bretttegart/treadmill | 812109e31c503a6eddaee2d3f2e1faf2833b6aaf | [
"Apache-2.0"
]
| 2 | 2017-10-31T18:48:20.000Z | 2018-03-04T20:35:20.000Z | lib/python/treadmill/websocket/api/identity_group.py | bretttegart/treadmill | 812109e31c503a6eddaee2d3f2e1faf2833b6aaf | [
"Apache-2.0"
]
| null | null | null | lib/python/treadmill/websocket/api/identity_group.py | bretttegart/treadmill | 812109e31c503a6eddaee2d3f2e1faf2833b6aaf | [
"Apache-2.0"
]
| null | null | null | """A WebSocket handler for Treadmill state.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from treadmill import schema
from treadmill import yamlwrapper as yaml
_LOGGER = logging.getLogger(__name__)
class IdentityGroupAPI(object):
"""Handler for /identity-groups topic.
"""
def __init__(self):
@schema.schema({'$ref': 'websocket/identity_group.json#/message'})
def subscribe(message):
"""Return filter based on message payload.
"""
identity_group = message.get('identity-group', '*')
return [('/'.join(['/identity-groups', identity_group]), '*')]
def on_event(filename, operation, content):
"""Event handler.
"""
if not filename.startswith('/identity-groups/'):
return
sow = operation is None
full_identity = filename[len('/identity-groups/'):]
identity_group, identity = full_identity.rsplit('/', 1)
message = {
'topic': '/identity-groups',
'identity-group': identity_group,
'identity': int(identity),
'app': None,
'host': None,
'sow': sow
}
if content:
message.update(yaml.load(content))
return message
self.subscribe = subscribe
self.on_event = on_event
def init():
"""API module init.
"""
return [('/identity-groups', IdentityGroupAPI(), [])]
| 26.532258 | 74 | 0.576292 |
78a24203b6c398d043460466acfc5d35a525cc4d | 343 | py | Python | apk_bitminer/setup.py | linkedin/apk-bitminer | 111752379e67f095f1f3ac2bc0bf2943ce36120e | [
"BSD-2-Clause"
]
| 4 | 2018-08-15T06:49:40.000Z | 2021-05-25T04:43:27.000Z | apk_bitminer/setup.py | linkedin/apk-bitminer | 111752379e67f095f1f3ac2bc0bf2943ce36120e | [
"BSD-2-Clause"
]
| null | null | null | apk_bitminer/setup.py | linkedin/apk-bitminer | 111752379e67f095f1f3ac2bc0bf2943ce36120e | [
"BSD-2-Clause"
]
| 4 | 2018-08-27T08:02:27.000Z | 2020-04-21T00:50:53.000Z | import setuptools
setuptools.setup(
name='apk_bitminer',
package_dir={'': 'src'},
packages=setuptools.find_packages('src'),
include_package_data=True,
namespace_packages=[],
license='BSD 2-CLAUSE LICENSE',
version='1.0.6',
scripts=['src/apk_bitminer/pydexdump',
'src/apk_bitminer/pyaxmldump']
)
| 21.4375 | 45 | 0.661808 |
b25b9900279f75a9b0b6aa96bdacc5c0f60930a1 | 126,765 | py | Python | atp_mens/data_2019_05.py | Tjorriemorrie/ufc | 46918c91e1ccf464d9d03dc8524dab91eca239d2 | [
"Apache-2.0"
]
| 1 | 2019-11-10T14:14:42.000Z | 2019-11-10T14:14:42.000Z | atp_mens/data_2019_05.py | Tjorriemorrie/ufc | 46918c91e1ccf464d9d03dc8524dab91eca239d2 | [
"Apache-2.0"
]
| 2 | 2020-09-25T23:55:31.000Z | 2022-02-10T00:20:20.000Z | atp_mens/data_2019_05.py | Tjorriemorrie/ufc | 46918c91e1ccf464d9d03dc8524dab91eca239d2 | [
"Apache-2.0"
]
| null | null | null | from men import *
from location import *
DATA_2019_05 = [
{
'location': MADRID,
'date': '2019-05-05',
'matches': [
# 2019-05-04
{
'round': 512,
'players': [
ALBERT_RAMOS_VINOLAS,
TARO_DANIEL
],
'score': [(3, 6), (6, 3), (7, 6)],
'odds': {
ALBERT_RAMOS_VINOLAS: 1.35,
TARO_DANIEL: 2.98
}
},
{
'round': 512,
'players': [
MARIUS_COPIL,
NICOLAS_JARRY
],
'score': [(5, 7), (6, 4), (7, 6)],
'odds': {
MARIUS_COPIL: 2.95,
NICOLAS_JARRY: 1.33
}
},
{
'round': 512,
'players': [
ROBERTO_CARBALLES_BAENA,
UGO_HUMBERT
],
'score': [(7, 6), (6, 1)],
'odds': {
ROBERTO_CARBALLES_BAENA: 1.25,
UGO_HUMBERT: 3.75
}
},
{
'round': 512,
'players': [
ROBIN_HAASE,
BERNARD_TOMIC
],
'score': [(6, 7), (6, 2), (7, 5)],
'odds': {
ROBIN_HAASE: 1.32,
BERNARD_TOMIC: 3.07
}
},
{
'round': 512,
'players': [
HUGO_DELLIEN,
LEONARDO_MAYER
],
'score': [(6, 2), (6, 4)],
'odds': {
HUGO_DELLIEN: 2.75,
LEONARDO_MAYER: 1.31
}
},
{
'round': 512,
'players': [
MARTIN_KLIZAN,
ERNESTS_GULBIS
],
'score': [(6, 4), (7, 6)],
'odds': {
MARTIN_KLIZAN: 1.38,
ERNESTS_GULBIS: 2.70
}
},
{
'round': 512,
'players': [
REILLY_OPELKA,
MISCHA_ZVEREV
],
'score': [(7, 6), (7, 6)],
'odds': {
REILLY_OPELKA: 1.21,
MISCHA_ZVEREV: 3.65
}
},
{
'round': 512,
'players': [
TAYLOR_FRITZ,
NICOLA_KUHN
],
'score': [(6, 4), (6, 3)],
'odds': {
TAYLOR_FRITZ: 1.65,
NICOLA_KUHN: 2.05
}
},
{
'round': 512,
'players': [
JUAN_IGNACIO_LONDERO,
MACKENZIE_MCDONALD
],
'score': [(6, 1), (6, 2)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.38,
MACKENZIE_MCDONALD: 2.80
}
},
{
'round': 512,
'players': [
ADRIAN_MANNARINO,
JORGE_PLANS
],
'score': [(6, 4), (6, 7), (7, 5)],
'odds': {
ADRIAN_MANNARINO: 1.01,
JORGE_PLANS: 19.85
}
},
{
'round': 512,
'players': [
GUIDO_ANDREOZZI,
DAMIR_DZUMHUR
],
'score': [(6, 3), (1, 6), (6, 3)],
'odds': {
GUIDO_ANDREOZZI: 1.37,
DAMIR_DZUMHUR: 2.88
}
},
{
'round': 512,
'players': [
HUBERT_HURKACZ,
MARCEL_GRANOLLERS
],
'score': [(6, 0), (6, 2)],
'odds': {
HUBERT_HURKACZ: 1.48,
MARCEL_GRANOLLERS: 2.50
}
},
{
'round': 512,
'players': [
CASPER_RUUD,
BENOIT_PAIRE
],
'score': [(6, 1), (6, 1)],
'odds': {
CASPER_RUUD: 2.04,
BENOIT_PAIRE: 1.59
}
},
{
'round': 512,
'players': [
PIERRE_HUGUES_HERBERT,
DENIS_KUDLA
],
'score': [(6, 3), (6, 4)],
'odds': {
PIERRE_HUGUES_HERBERT: 1.27,
DENIS_KUDLA: 3.40
}
},
# 2019-05-05
{
'round': 256,
'players': [
ALBERT_RAMOS_VINOLAS,
CASPER_RUUD
],
'score': [(2, 6), (7, 6), (6, 1)],
'odds': {
ALBERT_RAMOS_VINOLAS: 2.00,
CASPER_RUUD: 1.71
}
},
{
'round': 256,
'players': [
HUGO_DELLIEN,
GUIDO_ANDREOZZI
],
'score': [(3, 6), (6, 4), (6, 2)],
'odds': {
HUGO_DELLIEN: 2.15,
GUIDO_ANDREOZZI: 1.63
}
},
{
'round': 256,
'players': [
REILLY_OPELKA,
JUAN_IGNACIO_LONDERO
],
'score': [(7, 6), (5, 7), (6, 2)],
'odds': {
REILLY_OPELKA: 1.99,
JUAN_IGNACIO_LONDERO: 1.61
}
},
{
'round': 256,
'players': [
TAYLOR_FRITZ,
MARIUS_COPIL
],
'score': [(7, 6), (7, 5)],
'odds': {
TAYLOR_FRITZ: 1.41,
MARIUS_COPIL: 2.68
}
},
{
'round': 256,
'players': [
MARTIN_KLIZAN,
ADRIAN_MANNARINO
],
'score': [(6, 3), (6, 2)],
'odds': {
MARTIN_KLIZAN: 1.33,
ADRIAN_MANNARINO: 3.00
}
},
{
'round': 256,
'players': [
HUBERT_HURKACZ,
ROBERTO_CARBALLES_BAENA
],
'score': [(6, 4), (6, 4)],
'odds': {
HUBERT_HURKACZ: 1.67,
ROBERTO_CARBALLES_BAENA: 2.05
}
},
{
'round': 256,
'players': [
PIERRE_HUGUES_HERBERT,
ROBIN_HAASE
],
'score': [(5, 7), (6, 3), (7, 6)],
'odds': {
PIERRE_HUGUES_HERBERT: 1.67,
ROBIN_HAASE: 2.10
}
},
{
'round': 64,
'players': [
JAN_LENNARD_STRUFF,
NICK_KYRGIOS
],
'score': [(7, 6), (6, 4)],
'odds': {
JAN_LENNARD_STRUFF: 1.99,
NICK_KYRGIOS: 1.83
}
},
{
'round': 64,
'players': [
FELIX_AUGER_ALIASSIME,
DENIS_SHAPOVALOV
],
'score': [(6, 2), (7, 6)],
'odds': {
FELIX_AUGER_ALIASSIME: 1.69,
DENIS_SHAPOVALOV: 2.14
}
},
# 2019-05-06
{
'round': 64,
'players': [
FERNANDO_VERDASCO,
RADU_ALBOT
],
'score': [(6, 2), (3, 6), (6, 1)],
'odds': {
FERNANDO_VERDASCO: 1.51,
RADU_ALBOT: 2.40
}
},
{
'round': 64,
'players': [
REILLY_OPELKA,
PABLO_CARRENO_BUSTA
],
'score': [(7, 6), (6, 4)],
'odds': {
REILLY_OPELKA: 2.05,
PABLO_CARRENO_BUSTA: 1.74
}
},
{
'round': 64,
'players': [
PHILIPP_KOHLSCHREIBER,
MIKHAIL_KUKUSHKIN
],
'score': [(6, 2), (7, 6)],
'odds': {
PHILIPP_KOHLSCHREIBER: 1.34,
MIKHAIL_KUKUSHKIN: 3.18
}
},
{
'round': 64,
'players': [
RICHARD_GASQUET,
ALEJANDRO_DAVIDOVICH_FOKINA
],
'score': [(7, 5), (7, 6)],
'odds': {
RICHARD_GASQUET: 2.65,
ALEJANDRO_DAVIDOVICH_FOKINA: 1.36
}
},
{
'round': 64,
'players': [
TAYLOR_FRITZ,
GRIGOR_DIMITROV
],
'score': [(7, 6), (7, 6)],
'odds': {
TAYLOR_FRITZ: 2.70,
GRIGOR_DIMITROV: 1.33
}
},
{
'round': 64,
'players': [
LASLO_DJERE,
DUSAN_LAJOVIC
],
'score': [(6, 4), (6, 4)],
'odds': {
LASLO_DJERE: 2.10,
DUSAN_LAJOVIC: 1.67
}
},
{
'round': 64,
'players': [
GAEL_MONFILS,
ANDREAS_SEPPI
],
'score': [(6, 3), (6, 1)],
'odds': {
GAEL_MONFILS: 1.28,
ANDREAS_SEPPI: 3.80
}
},
{
'round': 64,
'players': [
FRANCES_TIAFOE,
NIKOLOZ_BASILASHVILI
],
'score': [(6, 7), (6, 3), (6, 4)],
'odds': {
FRANCES_TIAFOE: 1.91,
NIKOLOZ_BASILASHVILI: 1.80
}
},
{
'round': 64,
'players': [
KAREN_KHACHANOV,
JAUME_MUNAR
],
'score': [(6, 4), (6, 7), (6, 3)],
'odds': {
KAREN_KHACHANOV: 1.98,
JAUME_MUNAR: 1.80
}
},
{
'round': 64,
'players': [
MARIN_CILIC,
MARTIN_KLIZAN
],
'score': [(6, 4), (2, 6), (7, 6)],
'odds': {
MARIN_CILIC: 1.53,
MARTIN_KLIZAN: 2.40
}
},
# 2019-05-07
{
'round': 64,
'players': [
STAN_WAWRINKA,
PIERRE_HUGUES_HERBERT
],
'score': [(6, 2), (6, 3)],
'odds': {
STAN_WAWRINKA: 1.44,
PIERRE_HUGUES_HERBERT: 2.66
}
},
{
'round': 64,
'players': [
JOHN_MILLMAN,
STEVE_JOHNSON
],
'score': [(7, 6), (7, 6)],
'odds': {
JOHN_MILLMAN: 1.69,
STEVE_JOHNSON: 2.12
}
},
{
'round': 64,
'players': [
ADRIAN_MANNARINO,
JOAO_SOUSA
],
'score': [(7, 5), (5, 7), (6, 1)],
'odds': {
ADRIAN_MANNARINO: 2.72,
JOAO_SOUSA: 1.45
}
},
{
'round': 64,
'players': [
HUBERT_HURKACZ,
ALEX_DE_MINAUR
],
'score': [(6, 3), (6, 4)],
'odds': {
HUBERT_HURKACZ: 1.35,
ALEX_DE_MINAUR: 3.19
}
},
{
'round': 64,
'players': [
MARTON_FUCSOVICS,
DAVID_GOFFIN
],
'score': [(6, 4), (7, 5)],
'odds': {
MARTON_FUCSOVICS: 2.30,
DAVID_GOFFIN: 1.59
}
},
{
'round': 64,
'players': [
DAVID_FERRER,
ROBERTO_BAUTISTA_AGUT
],
'score': [(6, 4), (4, 6), (6, 4)],
'odds': {
DAVID_FERRER: 2.20,
ROBERTO_BAUTISTA_AGUT: 1.67
}
},
{
'round': 64,
'players': [
HUGO_DELLIEN,
GILLES_SIMON
],
'score': [(4, 6), (6, 1), (7, 6)],
'odds': {
HUGO_DELLIEN: 2.30,
GILLES_SIMON: 1.61
}
},
{
'round': 64,
'players': [
JEREMY_CHARDY,
ALBERT_RAMOS_VINOLAS
],
'score': [(6, 2), (4, 6), (7, 5)],
'odds': {
JEREMY_CHARDY: 2.65,
ALBERT_RAMOS_VINOLAS: 1.43
}
},
{
'round': 64,
'players': [
DIEGO_SCHWARTZMAN,
MARCO_CECCHINATO
],
'score': [(6, 0), (4, 6), (6, 1)],
'odds': {
DIEGO_SCHWARTZMAN: 1.93,
MARCO_CECCHINATO: 1.84
}
},
{
'round': 64,
'players': [
LUCAS_POUILLE,
BORNA_CORIC
],
'score': [(6, 3), (7, 5)],
'odds': {
LUCAS_POUILLE: 2.85,
BORNA_CORIC: 1.42
}
},
{
'round': 64,
'players': [
GUIDO_PELLA,
DANIIL_MEDVEDEV
],
'score': [(6, 2), (1, 6), (6, 3)],
'odds': {
GUIDO_PELLA: 2.40,
DANIIL_MEDVEDEV: 1.56
}
},
{
'round': 64,
'players': [
FABIO_FOGNINI,
KYLE_EDMUND
],
'score': [(6, 4), (6, 3)],
'odds': {
FABIO_FOGNINI: 1.80,
KYLE_EDMUND: 1.91
}
},
{
'round': 32,
'players': [
MARIN_CILIC,
JAN_LENNARD_STRUFF
],
'score': [(4, 6), (6, 3), (6, 4)],
'odds': {
MARIN_CILIC: 1.87,
JAN_LENNARD_STRUFF: 1.83
}
},
{
'round': 32,
'players': [
DOMINIC_THIEM,
REILLY_OPELKA
],
'score': [(6, 7), (6, 3), (1, 0)],
'retired': True,
'odds': {
DOMINIC_THIEM: 1.13,
REILLY_OPELKA: 5.50
}
},
{
'round': 32,
'players': [
ROGER_FEDERER,
RICHARD_GASQUET
],
'score': [(6, 2), (6, 3)],
'odds': {
ROGER_FEDERER: 1.08,
RICHARD_GASQUET: 6.75
}
},
{
'round': 32,
'players': [
NOVAK_DJOKOVIC,
TAYLOR_FRITZ
],
'score': [(6, 4), (6, 2)],
'odds': {
NOVAK_DJOKOVIC: 1.09,
TAYLOR_FRITZ: 6.50
}
},
# 2019-05-08
{
'round': 32,
'players': [
STAN_WAWRINKA,
GUIDO_PELLA
],
'score': [(6, 3), (6, 4)],
'odds': {
STAN_WAWRINKA: 1.65,
GUIDO_PELLA: 2.25
}
},
{
'round': 32,
'players': [
FRANCES_TIAFOE,
PHILIPP_KOHLSCHREIBER
],
'score': [(6, 4), (3, 6), (6, 3)],
'odds': {
FRANCES_TIAFOE: 2.60,
PHILIPP_KOHLSCHREIBER: 1.50
}
},
{
'round': 32,
'players': [
HUBERT_HURKACZ,
LUCAS_POUILLE
],
'score': [(7, 5), (6, 1)],
'odds': {
HUBERT_HURKACZ: 1.70,
LUCAS_POUILLE: 2.10
}
},
{
'round': 32,
'players': [
JEREMY_CHARDY,
DIEGO_SCHWARTZMAN
],
'score': [(6, 1), (6, 2)],
'odds': {
JEREMY_CHARDY: 3.11,
DIEGO_SCHWARTZMAN: 1.34
}
},
{
'round': 32,
'players': [
GAEL_MONFILS,
MARTON_FUCSOVICS
],
'score': [(1, 6), (6, 4), (6, 2)],
'odds': {
GAEL_MONFILS: 1.53,
MARTON_FUCSOVICS: 2.50
}
},
{
'round': 32,
'players': [
FERNANDO_VERDASCO,
KAREN_KHACHANOV
],
'score': [(6, 7), (6, 1), (7, 5)],
'odds': {
FERNANDO_VERDASCO: 2.15,
KAREN_KHACHANOV: 1.69
}
},
{
'round': 32,
'players': [
FABIO_FOGNINI,
JOHN_MILLMAN
],
'score': [(6, 2), (6, 2)],
'odds': {
FABIO_FOGNINI: 1.36,
JOHN_MILLMAN: 3.09
}
},
{
'round': 32,
'players': [
STEFANOS_TSITSIPAS,
ADRIAN_MANNARINO
],
'score': [(6, 2), (7, 5)],
'odds': {
STEFANOS_TSITSIPAS: 1.11,
ADRIAN_MANNARINO: 6.00
}
},
{
'round': 32,
'players': [
LASLO_DJERE,
JUAN_MARTIN_DEL_POTRO
],
'score': [(6, 3), (2, 6), (7, 5)],
'odds': {
LASLO_DJERE: 2.00,
JUAN_MARTIN_DEL_POTRO: 1.79
}
},
{
'round': 32,
'players': [
KEI_NISHIKORI,
HUGO_DELLIEN
],
'score': [(7, 5), (7, 5)],
'odds': {
KEI_NISHIKORI: 1.14,
HUGO_DELLIEN: 5.50
}
},
{
'round': 32,
'players': [
ALEXANDER_ZVEREV,
DAVID_FERRER
],
'score': [(6, 4), (6, 1)],
'odds': {
ALEXANDER_ZVEREV: 1.33,
DAVID_FERRER: 3.43
}
},
{
'round': 32,
'players': [
RAFAEL_NADAL,
FELIX_AUGER_ALIASSIME
],
'score': [(6, 3), (6, 3)],
'odds': {
RAFAEL_NADAL: 1.16,
FELIX_AUGER_ALIASSIME: 5.25
}
},
# 2019-05-09
{
'round': 16,
'players': [
MARIN_CILIC,
LASLO_DJERE
],
'score': [(4, 6), (6, 3), (6, 2)],
'odds': {
MARIN_CILIC: 1.63,
LASLO_DJERE: 2.15
}
},
{
'round': 16,
'players': [
STEFANOS_TSITSIPAS,
FERNANDO_VERDASCO
],
'score': [(6, 3), (6, 4)],
'odds': {
STEFANOS_TSITSIPAS: 1.33,
FERNANDO_VERDASCO: 3.15
}
},
{
'round': 16,
'players': [
STAN_WAWRINKA,
KEI_NISHIKORI
],
'score': [(6, 3), (7, 6)],
'odds': {
STAN_WAWRINKA: 1.71,
KEI_NISHIKORI: 2.05
}
},
{
'round': 16,
'players': [
DOMINIC_THIEM,
FABIO_FOGNINI
],
'score': [(6, 4), (7, 5)],
'odds': {
DOMINIC_THIEM: 1.34,
FABIO_FOGNINI: 3.22
}
},
{
'round': 16,
'players': [
ROGER_FEDERER,
GAEL_MONFILS
],
'score': [(6, 0), (4, 6), (7, 6)],
'odds': {
ROGER_FEDERER: 1.29,
GAEL_MONFILS: 3.55
}
},
{
'round': 16,
'players': [
ALEXANDER_ZVEREV,
HUBERT_HURKACZ
],
'score': [(3, 6), (6, 4), (6, 4)],
'odds': {
ALEXANDER_ZVEREV: 1.44,
HUBERT_HURKACZ: 2.95
}
},
{
'round': 16,
'players': [
RAFAEL_NADAL,
FRANCES_TIAFOE
],
'score': [(6, 3), (6, 4)],
'odds': {
RAFAEL_NADAL: 1.05,
FRANCES_TIAFOE: 9.00
}
},
{
'round': 16,
'players': [
NOVAK_DJOKOVIC,
JEREMY_CHARDY
],
'score': [(6, 1), (7, 6)],
'odds': {
NOVAK_DJOKOVIC: 1.07,
JEREMY_CHARDY: 7.00
}
},
# 2019-05-10
{
'round': 8,
'players': [
DOMINIC_THIEM,
ROGER_FEDERER
],
'score': [(3, 6), (7, 6), (6, 4)],
'odds': {
DOMINIC_THIEM: 1.43,
ROGER_FEDERER: 2.80
}
},
{
'round': 8,
'players': [
STEFANOS_TSITSIPAS,
ALEXANDER_ZVEREV
],
'score': [(7, 5), (3, 6), (6, 2)],
'odds': {
STEFANOS_TSITSIPAS: 1.77,
ALEXANDER_ZVEREV: 2.10
}
},
{
'round': 8,
'players': [
RAFAEL_NADAL,
STAN_WAWRINKA
],
'score': [(6, 1), (6, 2)],
'odds': {
RAFAEL_NADAL: 1.20,
STAN_WAWRINKA: 4.60
}
},
{
'round': 8,
'players': [
NOVAK_DJOKOVIC,
MARIN_CILIC
],
'score': [],
'retired': True,
'odds': {
NOVAK_DJOKOVIC: 1.14,
MARIN_CILIC: 5.50
}
},
# 2019-05-11
{
'round': 4,
'players': [
STEFANOS_TSITSIPAS,
RAFAEL_NADAL
],
'score': [(6, 4), (2, 6), (6, 3)],
'odds': {
STEFANOS_TSITSIPAS: 5.50,
RAFAEL_NADAL: 1.12
}
},
{
'round': 4,
'players': [
NOVAK_DJOKOVIC,
DOMINIC_THIEM
],
'score': [(7, 6), (7, 6)],
'odds': {
NOVAK_DJOKOVIC: 1.74,
DOMINIC_THIEM: 1.95
}
},
# 2019-05-12
{
'round': 2,
'players': [
NOVAK_DJOKOVIC,
STEFANOS_TSITSIPAS
],
'score': [(6, 3), (6, 4)],
'odds': {
NOVAK_DJOKOVIC: 1.43,
STEFANOS_TSITSIPAS: 2.80
}
}
]
},
{
'location': ROME,
'date': '2019-05-12',
'matches': [
# 2019-05-11
{
'round': 512,
'players': [
BERNARD_TOMIC,
RICCARDO_BALZERANI
],
'score': [(6, 1), (6, 4)],
'odds': {
BERNARD_TOMIC: 1.19,
RICCARDO_BALZERANI: 4.43
}
},
{
'round': 512,
'players': [
NICOLAS_JARRY,
MARIUS_COPIL
],
'score': [(7, 6), (6, 2)],
'odds': {
NICOLAS_JARRY: 1.33,
MARIUS_COPIL: 3.20
}
},
{
'round': 512,
'players': [
CASPER_RUUD,
LORENZO_MUSETTI
],
'score': [(6, 1), (6, 4)],
'odds': {
CASPER_RUUD: 1.15,
LORENZO_MUSETTI: 5.18
}
},
{
'round': 512,
'players': [
YOSHIHITO_NISHIOKA,
DENIS_KUDLA
],
'score': [(7, 6), (4, 6), (6, 3)],
'odds': {
YOSHIHITO_NISHIOKA: 1.31,
DENIS_KUDLA: 3.35
}
},
{
'round': 512,
'players': [
DANIEL_EVANS,
ROBIN_HAASE
],
'score': [(6, 2), (6, 4)],
'odds': {
DANIEL_EVANS: 2.63,
ROBIN_HAASE: 1.37
}
},
{
'round': 512,
'players': [
FILLIPPO_BALDI,
MARTIN_KLIZAN
],
'score': [(6, 3), (6, 1)],
'odds': {
FILLIPPO_BALDI: 3.94,
MARTIN_KLIZAN: 1.22
}
},
{
'round': 512,
'players': [
REILLY_OPELKA,
ERNESTS_GULBIS
],
'score': [(7, 6), (6, 3)],
'odds': {
REILLY_OPELKA: 1.71,
ERNESTS_GULBIS: 1.90
}
},
{
'round': 512,
'players': [
TAYLOR_FRITZ,
JACOPO_BERRETTINI
],
'score': [(6, 4), (6, 3)],
'odds': {
TAYLOR_FRITZ: 1.09,
JACOPO_BERRETTINI: 6.25
}
},
{
'round': 512,
'players': [
ALBERT_RAMOS_VINOLAS,
JAUME_MUNAR
],
'score': [(6, 7), (6, 3), (7, 6)],
'odds': {
ALBERT_RAMOS_VINOLAS: 2.15,
JAUME_MUNAR: 1.61
}
},
{
'round': 512,
'players': [
DAMIR_DZUMHUR,
TARO_DANIEL
],
'score': [(6, 0), (6, 3)],
'odds': {
DAMIR_DZUMHUR: 2.40,
TARO_DANIEL: 1.56
}
},
{
'round': 512,
'players': [
MIOMIR_KECMANOVIC,
HUBERT_HURKACZ
],
'score': [(6, 3), (1, 6), (6, 4)],
'odds': {
MIOMIR_KECMANOVIC: 2.70,
HUBERT_HURKACZ: 1.43
}
},
{
'round': 512,
'players': [
CAMERON_NORRIE,
PETER_GOJOWCZYK
],
'score': [(6, 7), (7, 5), (6, 3)],
'odds': {
CAMERON_NORRIE: 1.51,
PETER_GOJOWCZYK: 2.35
}
},
{
'round': 512,
'players': [
BENOIT_PAIRE,
BRADLEY_KLAHN
],
'score': [(6, 4), (6, 2)],
'odds': {
BENOIT_PAIRE: 1.15,
BRADLEY_KLAHN: 5.20
}
},
{
'round': 512,
'players': [
DUSAN_LAJOVIC,
ALJAZ_BEDENE
],
'score': [(7, 6), (6, 7), (6, 1)],
'odds': {
DUSAN_LAJOVIC: 1.56,
ALJAZ_BEDENE: 2.15
}
},
# 2019-05-12
{
'round': 256,
'players': [
ALBERT_RAMOS_VINOLAS,
BERNARD_TOMIC
],
'score': [(7, 6), (7, 5)],
'odds': {
ALBERT_RAMOS_VINOLAS: 1.25,
BERNARD_TOMIC: 3.34
}
},
{
'round': 256,
'players': [
CASPER_RUUD,
MIOMIR_KECMANOVIC
],
'score': [(6, 2), (6, 1)],
'odds': {
CASPER_RUUD: 1.36,
MIOMIR_KECMANOVIC: 2.52
}
},
{
'round': 256,
'players': [
TAYLOR_FRITZ,
FILLIPPO_BALDI
],
'score': [(6, 4), (6, 3)],
'odds': {
TAYLOR_FRITZ: 1.41,
FILLIPPO_BALDI: 2.75
}
},
{
'round': 256,
'players': [
YOSHIHITO_NISHIOKA,
DAMIR_DZUMHUR
],
'score': [(6, 4), (4, 6), (6, 3)],
'odds': {
YOSHIHITO_NISHIOKA: 1.91,
DAMIR_DZUMHUR: 1.71
}
},
{
'round': 256,
'players': [
CAMERON_NORRIE,
NICOLAS_JARRY
],
'score': [(6, 3), (4, 6), (7, 6)],
'odds': {
CAMERON_NORRIE: 2.60,
NICOLAS_JARRY: 1.39
}
},
{
'round': 256,
'players': [
BENOIT_PAIRE,
REILLY_OPELKA
],
'score': [(5, 7), (6, 2), (7, 6)],
'odds': {
BENOIT_PAIRE: 1.54,
REILLY_OPELKA: 2.29
}
},
{
'round': 256,
'players': [
DANIEL_EVANS,
DUSAN_LAJOVIC
],
'score': [(7, 5), (6, 3)],
'odds': {
DANIEL_EVANS: 2.90,
DUSAN_LAJOVIC: 1.27
}
},
{
'round': 64,
'players': [
JANNIK_SINNER,
STEVE_JOHNSON
],
'score': [(1, 6), (6, 1), (7, 5)],
'odds': {
JANNIK_SINNER: 2.30,
STEVE_JOHNSON: 1.65
}
},
{
'round': 64,
'players': [
PHILIPP_KOHLSCHREIBER,
GILLES_SIMON
],
'score': [(6, 2), (3, 6), (6, 3)],
'odds': {
PHILIPP_KOHLSCHREIBER: 1.33,
GILLES_SIMON: 3.26
}
},
{
'round': 64,
'players': [
MATTEO_BERRETTINI,
LUCAS_POUILLE
],
'score': [(6, 2), (6, 4)],
'odds': {
MATTEO_BERRETTINI: 1.38,
LUCAS_POUILLE: 3.15
}
},
{
'round': 64,
'players': [
ROBERTO_BAUTISTA_AGUT,
ANDREAS_SEPPI
],
'score': [(6, 1), (3, 6), (6, 1)],
'odds': {
ROBERTO_BAUTISTA_AGUT: 1.23,
ANDREAS_SEPPI: 4.17
}
},
{
'round': 64,
'players': [
NIKOLOZ_BASILASHVILI,
MARTON_FUCSOVICS
],
'score': [(6, 1), (7, 6)],
'odds': {
NIKOLOZ_BASILASHVILI: 2.60,
MARTON_FUCSOVICS: 1.52
}
},
# 2019-05-13
{
'round': 64,
'players': [
FERNANDO_VERDASCO,
KYLE_EDMUND
],
'score': [(4, 6), (6, 4), (6, 2)],
'odds': {
FERNANDO_VERDASCO: 2.20,
KYLE_EDMUND: 1.67
}
},
{
'round': 64,
'players': [
DENIS_SHAPOVALOV,
PABLO_CARRENO_BUSTA
],
'score': [(6, 3), (7, 6)],
'odds': {
DENIS_SHAPOVALOV: 1.67,
PABLO_CARRENO_BUSTA: 2.19
}
},
{
'round': 64,
'players': [
CASPER_RUUD,
DANIEL_EVANS
],
'score': [(7, 5), (0, 6), (6, 3)],
'odds': {
CASPER_RUUD: 1.42,
DANIEL_EVANS: 2.88
}
},
{
'round': 64,
'players': [
CAMERON_NORRIE,
JOHN_MILLMAN
],
'score': [(3, 6), (6, 3), (6, 4)],
'odds': {
CAMERON_NORRIE: 1.87,
JOHN_MILLMAN: 1.89
}
},
{
'round': 64,
'players': [
LASLO_DJERE,
MIKHAIL_KUKUSHKIN
],
'score': [(6, 3), (6, 4)],
'odds': {
LASLO_DJERE: 1.37,
MIKHAIL_KUKUSHKIN: 2.90
}
},
{
'round': 64,
'players': [
MARCO_CECCHINATO,
ALEX_DE_MINAUR
],
'score': [(4, 6), (6, 3), (6, 1)],
'odds': {
MARCO_CECCHINATO: 1.25,
ALEX_DE_MINAUR: 4.04
}
},
{
'round': 64,
'players': [
ALBERT_RAMOS_VINOLAS,
GAEL_MONFILS
],
'score': [(6, 3), (6, 1)],
'odds': {
ALBERT_RAMOS_VINOLAS: 3.30,
GAEL_MONFILS: 1.36
}
},
{
'round': 64,
'players': [
BORNA_CORIC,
FELIX_AUGER_ALIASSIME
],
'score': [(6, 7), (6, 3), (6, 4)],
'odds': {
BORNA_CORIC: 2.00,
FELIX_AUGER_ALIASSIME: 1.80
}
},
{
'round': 64,
'players': [
KAREN_KHACHANOV,
LORENZO_SONEGO
],
'score': [(6, 3), (6, 7), (6, 3)],
'odds': {
KAREN_KHACHANOV: 1.83,
LORENZO_SONEGO: 2.00
}
},
{
'round': 64,
'players': [
FABIO_FOGNINI,
JO_WILFRIED_TSONGA
],
'score': [(6, 3), (6, 4)],
'odds': {
FABIO_FOGNINI: 1.36,
JO_WILFRIED_TSONGA: 3.35
}
},
# 2019-05-14
{
'round': 64,
'players': [
JAN_LENNARD_STRUFF,
GRIGOR_DIMITROV
],
'score': [(6, 4), (6, 7), (6, 3)],
'odds': {
JAN_LENNARD_STRUFF: 2.14,
GRIGOR_DIMITROV: 1.71
}
},
{
'round': 64,
'players': [
JOAO_SOUSA,
FRANCES_TIAFOE
],
'score': [(6, 3), (6, 7), (7, 6)],
'odds': {
JOAO_SOUSA: 2.70,
FRANCES_TIAFOE: 1.45
}
},
{
'round': 64,
'players': [
DIEGO_SCHWARTZMAN,
YOSHIHITO_NISHIOKA
],
'score': [(6, 1), (6, 4)],
'odds': {
DIEGO_SCHWARTZMAN: 1.43,
YOSHIHITO_NISHIOKA: 2.84
}
},
{
'round': 64,
'players': [
DAVID_GOFFIN,
STAN_WAWRINKA
],
'score': [(6, 1), (6, 4)],
'odds': {
DAVID_GOFFIN: 2.60,
STAN_WAWRINKA: 1.49
}
},
{
'round': 64,
'players': [
TAYLOR_FRITZ,
GUIDO_PELLA
],
'score': [(6, 3), (6, 4)],
'odds': {
TAYLOR_FRITZ: 3.15,
GUIDO_PELLA: 1.38
}
},
{
'round': 64,
'players': [
JEREMY_CHARDY,
RICHARD_GASQUET
],
'score': [(6, 1), (4, 6), (6, 3)],
# no odds
},
{
'round': 64,
'players': [
RADU_ALBOT,
BENOIT_PAIRE
],
'score': [(6, 3), (6, 2)],
'odds': {
RADU_ALBOT: 2.79,
BENOIT_PAIRE: 1.44
}
},
{
'round': 64,
'players': [
NICK_KYRGIOS,
DANIIL_MEDVEDEV
],
'score': [(6, 3), (3, 6), (6, 3)],
'odds': {
NICK_KYRGIOS: 2.99,
DANIIL_MEDVEDEV: 1.39
}
},
{
'round': 64,
'players': [
MARIN_CILIC,
ANDREA_BASSO
],
'score': [(6, 1), (7, 5)],
'odds': {
MARIN_CILIC: 1.07,
ANDREA_BASSO: 7.79
}
},
{
'round': 32,
'players': [
NIKOLOZ_BASILASHVILI,
LASLO_DJERE
],
'score': [(7, 5), (6, 4)],
'odds': {
NIKOLOZ_BASILASHVILI: 2.26,
LASLO_DJERE: 1.61
}
},
{
'round': 32,
'players': [
BORNA_CORIC,
CAMERON_NORRIE
],
'score': [(6, 2), (6, 2)],
'odds': {
BORNA_CORIC: 1.20,
CAMERON_NORRIE: 4.40
}
},
{
'round': 32,
'players': [
KAREN_KHACHANOV,
ROBERTO_BAUTISTA_AGUT
],
'score': [(5, 7), (6, 4), (6, 2)],
'odds': {
KAREN_KHACHANOV: 2.20,
ROBERTO_BAUTISTA_AGUT: 1.65
}
},
{
'round': 32,
'players': [
MATTEO_BERRETTINI,
ALEXANDER_ZVEREV
],
'score': [(7, 5), (7, 5)],
'odds': {
MATTEO_BERRETTINI: 2.31,
ALEXANDER_ZVEREV: 1.61
}
},
# 2019-05-16
{
'round': 32,
'players': [
DIEGO_SCHWARTZMAN,
ALBERT_RAMOS_VINOLAS
],
'score': [(7, 6), (6, 1)],
'odds': {
DIEGO_SCHWARTZMAN: 1.44,
ALBERT_RAMOS_VINOLAS: 2.80
}
},
{
'round': 32,
'players': [
CASPER_RUUD,
NICK_KYRGIOS
],
'score': [(6, 3), (6, 7), (2, 1)],
'def': True,
'odds': {
CASPER_RUUD: 3.10,
NICK_KYRGIOS: 1.36,
}
},
{
'round': 32,
'players': [
PHILIPP_KOHLSCHREIBER,
MARCO_CECCHINATO
],
'score': [(6, 3), (6, 3)],
'odds': {
PHILIPP_KOHLSCHREIBER: 2.17,
MARCO_CECCHINATO: 1.69
}
},
{
'round': 32,
'players': [
FABIO_FOGNINI,
RADU_ALBOT
],
'score': [(7, 6), (6, 3)],
'odds': {
FABIO_FOGNINI: 1.25,
RADU_ALBOT: 3.70
}
},
{
'round': 32,
'players': [
JAN_LENNARD_STRUFF,
MARIN_CILIC
],
'score': [(6, 2), (6, 3)],
'odds': {
JAN_LENNARD_STRUFF: 2.40,
MARIN_CILIC: 1.57
}
},
{
'round': 32,
'players': [
STEFANOS_TSITSIPAS,
JANNIK_SINNER
],
'score': [(6, 3), (6, 2)],
'odds': {
STEFANOS_TSITSIPAS: 1.13,
JANNIK_SINNER: 5.50
}
},
{
'round': 32,
'players': [
JUAN_MARTIN_DEL_POTRO,
DAVID_GOFFIN
],
'score': [(6, 4), (6, 2)],
'odds': {
JUAN_MARTIN_DEL_POTRO: 2.35,
DAVID_GOFFIN: 1.56
}
},
{
'round': 32,
'players': [
KEI_NISHIKORI,
TAYLOR_FRITZ
],
'score': [(6, 2), (6, 4)],
'odds': {
KEI_NISHIKORI: 1.31,
TAYLOR_FRITZ: 3.45
}
},
{
'round': 32,
'players': [
FERNANDO_VERDASCO,
DOMINIC_THIEM
],
'score': [(4, 6), (6, 4), (7, 5)],
'odds': {
FERNANDO_VERDASCO: 4.90,
DOMINIC_THIEM: 1.16
}
},
{
'round': 32,
'players': [
ROGER_FEDERER,
JOAO_SOUSA
],
'score': [(6, 4), (6, 3)],
'odds': {
ROGER_FEDERER: 1.07,
JOAO_SOUSA: 8.50
}
},
{
'round': 32,
'players': [
RAFAEL_NADAL,
JEREMY_CHARDY
],
'score': [(6, 0), (6, 1)],
'odds': {
RAFAEL_NADAL: 1.03,
JEREMY_CHARDY: 11.00
}
},
{
'round': 32,
'players': [
NOVAK_DJOKOVIC,
DENIS_SHAPOVALOV
],
'score': [(6, 1), (6, 3)],
'odds': {
NOVAK_DJOKOVIC: 1.10,
DENIS_SHAPOVALOV: 7.00
}
},
{
'round': 16,
'players': [
DIEGO_SCHWARTZMAN,
MATTEO_BERRETTINI
],
'score': [(6, 3), (6, 4)],
'odds': {
DIEGO_SCHWARTZMAN: 2.37,
MATTEO_BERRETTINI: 1.54
}
},
{
'round': 16,
'players': [
FERNANDO_VERDASCO,
KAREN_KHACHANOV
],
'score': [(7, 5), (3, 6), (6, 3)],
'odds': {
FERNANDO_VERDASCO: 2.60,
KAREN_KHACHANOV: 2.60
}
},
{
'round': 16,
'players': [
STEFANOS_TSITSIPAS,
FABIO_FOGNINI
],
'score': [(6, 4), (6, 3)],
'odds': {
STEFANOS_TSITSIPAS: 1.43,
FABIO_FOGNINI: 2.45
}
},
{
'round': 16,
'players': [
JUAN_MARTIN_DEL_POTRO,
CASPER_RUUD
],
'score': [(6, 4), (6, 4)],
'odds': {
JUAN_MARTIN_DEL_POTRO: 1.36,
CASPER_RUUD: 3.10
}
},
{
'round': 16,
'players': [
KEI_NISHIKORI,
JAN_LENNARD_STRUFF
],
'score': [(3, 6), (7, 6), (6, 3)],
'odds': {
KEI_NISHIKORI: 1.50,
JAN_LENNARD_STRUFF: 2.65
}
},
{
'round': 16,
'players': [
ROGER_FEDERER,
BORNA_CORIC
],
'score': [(2, 6), (6, 4), (7, 6)],
'odds': {
ROGER_FEDERER: 1.50,
BORNA_CORIC: 2.45
}
},
{
'round': 16,
'players': [
RAFAEL_NADAL,
NIKOLOZ_BASILASHVILI
],
'score': [(6, 1), (6, 0)],
'odds': {
RAFAEL_NADAL: 1.02,
NIKOLOZ_BASILASHVILI: 11.00
}
},
{
'round': 16,
'players': [
NOVAK_DJOKOVIC,
PHILIPP_KOHLSCHREIBER
],
'score': [(6, 3), (6, 0)],
'odds': {
NOVAK_DJOKOVIC: 1.08,
PHILIPP_KOHLSCHREIBER: 7.00
}
},
# 2019-05-17
{
'round': 8,
'players': [
DIEGO_SCHWARTZMAN,
KEI_NISHIKORI
],
'score': [(6, 4), (6, 2)],
'odds': {
DIEGO_SCHWARTZMAN: 2.30,
KEI_NISHIKORI: 1.57
}
},
{
'round': 8,
'players': [
STEFANOS_TSITSIPAS,
ROGER_FEDERER
],
'score': [],
'retired': True,
'odds': {
STEFANOS_TSITSIPAS: 1.69,
ROGER_FEDERER: 2.15
}
},
{
'round': 8,
'players': [
RAFAEL_NADAL,
FERNANDO_VERDASCO
],
'score': [(6, 4), (6, 0)],
'odds': {
RAFAEL_NADAL: 1.03,
FERNANDO_VERDASCO: 12.00
}
},
{
'round': 8,
'players': [
NOVAK_DJOKOVIC,
JUAN_MARTIN_DEL_POTRO
],
'score': [(4, 6), (7, 6), (6, 4)],
'odds': {
NOVAK_DJOKOVIC: 1.12,
JUAN_MARTIN_DEL_POTRO: 6.62
}
},
# 2019-05-18
{
'round': 4,
'players': [
RAFAEL_NADAL,
STEFANOS_TSITSIPAS
],
'score': [(6, 3), (6, 4)],
'odds': {
RAFAEL_NADAL: 1.21,
STEFANOS_TSITSIPAS: 4.00
}
},
{
'round': 4,
'players': [
NOVAK_DJOKOVIC,
DIEGO_SCHWARTZMAN
],
'score': [(6, 3), (6, 7), (6, 3)],
'odds': {
NOVAK_DJOKOVIC: 1.12,
DIEGO_SCHWARTZMAN: 6.50
}
},
# 2019-05-19
{
'round': 2,
'players': [
RAFAEL_NADAL,
NOVAK_DJOKOVIC
],
'score': [(6, 0), (4, 6), (6, 1)],
'odds': {
RAFAEL_NADAL: 1.47,
NOVAK_DJOKOVIC: 2.62
}
}
]
},
{
'location': GENEVA,
'date': '2019-05-19',
'matches': [
# 2019-05-18
{
'round': 512,
'players': [
BERNABE_ZAPATA_MIRALLES,
BRADLEY_KLAHN
],
'score': [(5, 7), (6, 4), (6, 2)],
'odds': {
BERNABE_ZAPATA_MIRALLES: 1.40,
BRADLEY_KLAHN: 2.75
}
},
{
'round': 512,
'players': [
KAICHI_UCHIDA,
RICARDAS_BERANKIS
],
'score': [(6, 4), (6, 4)],
'odds': {
KAICHI_UCHIDA: 3.78,
RICARDAS_BERANKIS: 1.25
}
},
{
'round': 512,
'players': [
LORENZO_SONEGO,
DANIEL_MASUR
],
'score': [(4, 6), (6, 2), (6, 3)],
'odds': {
LORENZO_SONEGO: 1.20,
DANIEL_MASUR: 3.99
}
},
{
'round': 512,
'players': [
TOMMY_PAUL,
STEPHANE_ROBERT
],
'score': [(6, 4), (7, 5)],
'odds': {
TOMMY_PAUL: 1.18,
STEPHANE_ROBERT: 4.65
}
},
{
'round': 512,
'players': [
MIOMIR_KECMANOVIC,
ULISES_BLANCH
],
'score': [(4, 6), (6, 1), (6, 4)],
'odds': {
MIOMIR_KECMANOVIC: 1.21,
ULISES_BLANCH: 3.60
}
},
{
'round': 512,
'players': [
THOMAS_FABBIANO,
MARKO_MILADINOVIC
],
'score': [(6, 2), (6, 4)],
'odds': {
THOMAS_FABBIANO: 1.10,
MARKO_MILADINOVIC: 6.25
}
},
{
'round': 512,
'players': [
DAMIR_DZUMHUR,
JC_ARAGONE
],
'score': [(6, 3), (6, 4)],
'odds': {
DAMIR_DZUMHUR: 1.10,
JC_ARAGONE: 6.35
}
},
{
'round': 512,
'players': [
GRIGOR_DIMITROV,
MARC_ANDREA_HUESLER
],
'score': [(6, 4), (6, 3)],
'odds': {
GRIGOR_DIMITROV: 1.08,
MARC_ANDREA_HUESLER: 6.50
}
},
# 2019-05-19
{
'round': 256,
'players': [
BERNABE_ZAPATA_MIRALLES,
KAICHI_UCHIDA
],
'score': [(6, 1), (6, 1)],
'odds': {
BERNABE_ZAPATA_MIRALLES: 1.26,
KAICHI_UCHIDA: 3.47
}
},
{
'round': 256,
'players': [
LORENZO_SONEGO,
MIOMIR_KECMANOVIC
],
'score': [(6, 4), (7, 6)],
'odds': {
LORENZO_SONEGO: 1.47,
MIOMIR_KECMANOVIC: 2.63
}
},
{
'round': 256,
'players': [
DAMIR_DZUMHUR,
TOMMY_PAUL
],
'score': [(6, 2), (4, 6), (6, 3)],
'odds': {
DAMIR_DZUMHUR: 1.65,
TOMMY_PAUL: 2.15
}
},
{
'round': 256,
'players': [
GRIGOR_DIMITROV,
THOMAS_FABBIANO
],
'score': [(7, 5), (6, 3)],
'odds': {
GRIGOR_DIMITROV: 1.18,
THOMAS_FABBIANO: 4.65
}
},
{
'round': 32,
'players': [
JUAN_IGNACIO_LONDERO,
MISCHA_ZVEREV
],
'score': [(6, 4), (6, 4)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.25,
MISCHA_ZVEREV: 4.05
}
},
{
'round': 32,
'players': [
ERNESTS_GULBIS,
YOSHIHITO_NISHIOKA
],
'score': [(6, 2), (7, 6)],
'odds': {
ERNESTS_GULBIS: 2.51,
YOSHIHITO_NISHIOKA: 1.53
}
},
# 2019-05-20
{
'round': 32,
'players': [
JANKO_TIPSAREVIC,
PETER_GOJOWCZYK
],
'score': [(7, 5), (7, 5)],
'odds': {
JANKO_TIPSAREVIC: 2.30,
PETER_GOJOWCZYK: 1.56
}
},
{
'round': 32,
'players': [
DENIS_KUDLA,
JORDAN_THOMPSON
],
'score': [(5, 7), (6, 2), (6, 4)],
'odds': {
DENIS_KUDLA: 2.30,
JORDAN_THOMPSON: 1.65
}
},
{
'round': 32,
'players': [
TARO_DANIEL,
BERNABE_ZAPATA_MIRALLES
],
'score': [(6, 4), (4, 6), (6, 3)],
'odds': {
TARO_DANIEL: 1.61,
BERNABE_ZAPATA_MIRALLES: 2.25
}
},
{
'round': 32,
'players': [
HUGO_DELLIEN,
ANDREAS_SEPPI
],
'score': [(6, 1), (3, 6), (6, 4)],
'odds': {
HUGO_DELLIEN: 1.79,
ANDREAS_SEPPI: 1.95
}
},
{
'round': 32,
'players': [
NICOLAS_JARRY,
MATTHEW_EBDEN
],
'score': [(6, 2), (7, 6)],
'odds': {
NICOLAS_JARRY: 1.12,
MATTHEW_EBDEN: 6.25
}
},
{
'round': 32,
'players': [
RADU_ALBOT,
LORENZO_SONEGO
],
'score': [(7, 6), (7, 6)],
'odds': {
RADU_ALBOT: 2.40,
LORENZO_SONEGO: 1.56
}
},
# 2019-05-21
{
'round': 32,
'players': [
JOAO_SOUSA,
LEONARDO_MAYER
],
'score': [(6, 2), (6, 7), (6, 4)],
'odds': {
JOAO_SOUSA: 1.98,
LEONARDO_MAYER: 1.74
}
},
{
'round': 32,
'players': [
DAMIR_DZUMHUR,
FELICIANO_LOPEZ
],
'score': [(6, 7), (6, 4), (7, 5)],
'odds': {
DAMIR_DZUMHUR: 1.49,
FELICIANO_LOPEZ: 2.60
}
},
{
'round': 32,
'players': [
FEDERICO_DELBONIS,
GRIGOR_DIMITROV
],
'score': [(6, 7), (6, 3), (6, 2)],
'odds': {
FEDERICO_DELBONIS: 2.70,
GRIGOR_DIMITROV: 1.44
}
},
{
'round': 16,
'players': [
HUGO_DELLIEN,
JANKO_TIPSAREVIC
],
'score': [(7, 6), (6, 3)],
'odds': {
HUGO_DELLIEN: 1.44,
JANKO_TIPSAREVIC: 2.75
}
},
{
'round': 16,
'players': [
ALEXANDER_ZVEREV,
ERNESTS_GULBIS
],
'score': [(7, 6), (6, 3)],
'odds': {
ALEXANDER_ZVEREV: 1.15,
ERNESTS_GULBIS: 5.50
}
},
# 2019-05-22
{
'round': 16,
'players': [
ALBERT_RAMOS_VINOLAS,
JOAO_SOUSA
],
'score': [(6, 0), (6, 3)],
'odds': {
ALBERT_RAMOS_VINOLAS: 1.63,
JOAO_SOUSA: 2.21
}
},
{
'round': 16,
'players': [
NICOLAS_JARRY,
DENIS_KUDLA
],
'score': [(6, 3), (6, 3)],
'odds': {
NICOLAS_JARRY: 1.40,
DENIS_KUDLA: 3.00
}
},
{
'round': 16,
'players': [
RADU_ALBOT,
JUAN_IGNACIO_LONDERO
],
'score': [(6, 1), (6, 7), (6, 4)],
'odds': {
RADU_ALBOT: 1.79,
JUAN_IGNACIO_LONDERO: 1.95
}
},
{
'round': 16,
'players': [
FEDERICO_DELBONIS,
MARTON_FUCSOVICS
],
'score': [(6, 4), (6, 2)],
'odds': {
FEDERICO_DELBONIS: 1.95,
MARTON_FUCSOVICS: 1.80
}
},
{
'round': 16,
'players': [
TARO_DANIEL,
CHRISTIAN_GARIN
],
'score': [(6, 2), (4, 6), (6, 4)],
'odds': {
TARO_DANIEL: 3.25,
CHRISTIAN_GARIN: 1.31
}
},
# 2019-05-23
{
'round': 8,
'players': [
NICOLAS_JARRY,
TARO_DANIEL
],
'score': [(6, 1), (7, 5)],
'odds': {
NICOLAS_JARRY: 1.46,
TARO_DANIEL: 2.87
}
},
{
'round': 8,
'players': [
FEDERICO_DELBONIS,
ALBERT_RAMOS_VINOLAS
],
'score': [(7, 6), (7, 5)],
'odds': {
FEDERICO_DELBONIS: 1.67,
ALBERT_RAMOS_VINOLAS: 2.20
}
},
{
'round': 8,
'players': [
RADU_ALBOT,
DAMIR_DZUMHUR
],
'score': [(6, 3), (7, 5)],
'odds': {
RADU_ALBOT: 1.83,
DAMIR_DZUMHUR: 1.87
}
},
{
'round': 8,
'players': [
ALEXANDER_ZVEREV,
HUGO_DELLIEN
],
'score': [(7, 5), (3, 6), (6, 3)],
'odds': {
ALEXANDER_ZVEREV: 1.19,
HUGO_DELLIEN: 5.37
}
},
# 2019-05-24
{
'round': 4,
'players': [
NICOLAS_JARRY,
RADU_ALBOT
],
'score': [(6, 3), (6, 4)],
'odds': {
NICOLAS_JARRY: 1.74,
RADU_ALBOT: 1.97
}
},
{
'round': 4,
'players': [
ALEXANDER_ZVEREV,
FEDERICO_DELBONIS
],
'score': [(7, 5), (6, 7), (6, 3)],
'odds': {
ALEXANDER_ZVEREV: 1.45,
FEDERICO_DELBONIS: 2.86
}
},
# 2019-05-25
{
'round': 2,
'players': [
ALEXANDER_ZVEREV,
NICOLAS_JARRY
],
'score': [(6, 3), (3, 6), (7, 6)],
'odds': {
ALEXANDER_ZVEREV: 1.47,
NICOLAS_JARRY: 2.82
}
}
]
},
{
'location': LYON,
'date': '2019-05-19',
'matches': [
# 2019-05-18
{
'round': 512,
'players': [
MAXIME_JANVIER,
GUILHERME_CLEZAR
],
'score': [(7, 5), (4, 6), (6, 4)],
'odds': {
MAXIME_JANVIER: 1.48,
GUILHERME_CLEZAR: 2.60
}
},
{
'round': 512,
'players': [
QUENTIN_HALYS,
MARIO_VILELLA_MARTINEZ
],
'score': [(7, 6), (6, 2)],
'odds': {
QUENTIN_HALYS: 1.34,
MARIO_VILELLA_MARTINEZ: 2.90
}
},
{
'round': 512,
'players': [
JANNIK_SINNER,
ANTOINE_HOANG
],
'score': [(6, 4), (5, 7), (6, 1)],
'odds': {
JANNIK_SINNER: 1.74,
ANTOINE_HOANG: 1.95
}
},
{
'round': 512,
'players': [
GREGOIRE_BARRERE,
HUGO_GRENIER
],
'score': [(6, 7), (6, 4), (7, 6)],
'odds': {
GREGOIRE_BARRERE: 1.17,
HUGO_GRENIER: 5.00
}
},
{
'round': 512,
'players': [
STEVEN_DIEZ,
ALEXEI_POPYRIN
],
'score': [(6, 4), (3, 6), (7, 5)],
'odds': {
STEVEN_DIEZ: 2.40,
ALEXEI_POPYRIN: 1.48
}
},
{
'round': 512,
'players': [
JIRI_VESELY,
NICOLA_KUHN
],
'score': [(6, 4), (6, 4)],
'odds': {
JIRI_VESELY: 1.65,
NICOLA_KUHN: 2.13
}
},
{
'round': 512,
'players': [
TRISTAN_LAMASINE,
ALEXANDER_BUBLIK
],
'score': [(6, 2), (2, 6), (7, 5)],
'odds': {
TRISTAN_LAMASINE: 1.78,
ALEXANDER_BUBLIK: 1.74
}
},
{
'round': 512,
'players': [
LLOYD_HARRIS,
KENNY_DE_SCHEPPER
],
'score': [(3, 6), (6, 3), (6, 2)],
'odds': {
LLOYD_HARRIS: 1.19,
KENNY_DE_SCHEPPER: 4.40
}
},
# 2019-05-19
{
'round': 256,
'players': [
JANNIK_SINNER,
TRISTAN_LAMASINE
],
'score': [(6, 4), (6, 3)],
'odds': {
JANNIK_SINNER: 1.48,
TRISTAN_LAMASINE: 2.45
}
},
{
'round': 256,
'players': [
STEVEN_DIEZ,
QUENTIN_HALYS
],
'score': [(6, 2), (3, 6), (6, 3)],
'odds': {
STEVEN_DIEZ: 2.90,
QUENTIN_HALYS: 1.33
}
},
{
'round': 256,
'players': [
JIRI_VESELY,
GREGOIRE_BARRERE
],
'score': [(3, 6), (6, 3), (6, 2)],
'odds': {
JIRI_VESELY: 1.48,
GREGOIRE_BARRERE: 2.50
}
},
{
'round': 256,
'players': [
MAXIME_JANVIER,
LLOYD_HARRIS
],
'score': [(6, 1), (6, 4)],
'odds': {
MAXIME_JANVIER: 2.05,
LLOYD_HARRIS: 1.69
}
},
{
'round': 32,
'players': [
JOHN_MILLMAN,
PABLO_ANDUJAR
],
'score': [(6, 1), (6, 3)],
'odds': {
JOHN_MILLMAN: 2.04,
PABLO_ANDUJAR: 1.72
}
},
{
'round': 32,
'players': [
JO_WILFRIED_TSONGA,
DUSAN_LAJOVIC
],
'score': [(7, 6), (6, 4)],
'odds': {
JO_WILFRIED_TSONGA: 1.60,
DUSAN_LAJOVIC: 2.30
}
},
# 2019-05-20
{
'round': 32,
'players': [
TRISTAN_LAMASINE,
JANNIK_SINNER
],
'score': [(6, 0), (7, 6)],
},
{
'round': 32,
'players': [
STEVE_JOHNSON,
LLOYD_HARRIS
],
'score': [(6, 2), (7, 6)],
},
{
'round': 32,
'players': [
UGO_HUMBERT,
CAMERON_NORRIE
],
'score': [(6, 1), (6, 3)],
'odds': {
UGO_HUMBERT: 2.50,
CAMERON_NORRIE: 1.49
}
},
{
'round': 32,
'players': [
TAYLOR_FRITZ,
JIRI_VESELY
],
'score': [(7, 5), (7, 6)],
'odds': {
TAYLOR_FRITZ: 1.67,
JIRI_VESELY: 2.15
}
},
{
'round': 32,
'players': [
STEVEN_DIEZ,
BERNARD_TOMIC
],
'score': [(6, 4), (4, 1)],
'retired': True,
'odds': {
STEVEN_DIEZ: 1.74,
BERNARD_TOMIC: 2.00
}
},
{
'round': 32,
'players': [
PIERRE_HUGUES_HERBERT,
JEREMY_CHARDY
],
'score': [(6, 7), (6, 2), (7, 6)],
'odds': {
PIERRE_HUGUES_HERBERT: 2.05,
JEREMY_CHARDY: 1.71
}
},
# 2019-05-21
{
'round': 32,
'players': [
BENOIT_PAIRE,
MACKENZIE_MCDONALD
],
'score': [(3, 6), (7, 6), (6, 1)],
'odds': {
BENOIT_PAIRE: 1.33,
MACKENZIE_MCDONALD: 3.09
}
},
{
'round': 32,
'players': [
CORENTIN_MOUTET,
REILLY_OPELKA
],
'score': [(6, 3), (2, 6), (7, 6)],
'odds': {
CORENTIN_MOUTET: 2.51,
REILLY_OPELKA: 1.50
}
},
{
'round': 32,
'players': [
PABLO_CUEVAS,
HUBERT_HURKACZ
],
'score': [(6, 4), (6, 4)],
'odds': {
PABLO_CUEVAS: 1.71,
HUBERT_HURKACZ: 2.05
}
},
{
'round': 16,
'players': [
STEVE_JOHNSON,
PIERRE_HUGUES_HERBERT
],
'score': [(7, 6), (5, 7), (6, 1)],
'odds': {
STEVE_JOHNSON: 2.55,
PIERRE_HUGUES_HERBERT: 1.51
}
},
{
'round': 16,
'players': [
FELIX_AUGER_ALIASSIME,
JOHN_MILLMAN
],
'score': [(7, 6), (7, 5)],
'odds': {
FELIX_AUGER_ALIASSIME: 1.43,
JOHN_MILLMAN: 2.55
}
},
# 2019-05-22
{
'round': 16,
'players': [
JO_WILFRIED_TSONGA,
STEVEN_DIEZ
],
'score': [(3, 6), (7, 6), (6, 3)],
'odds': {
JO_WILFRIED_TSONGA: 1.20,
STEVEN_DIEZ: 4.50
}
},
{
'round': 16,
'players': [
BENOIT_PAIRE,
PABLO_CUEVAS
],
'score': [(6, 4), (6, 3)],
'odds': {
BENOIT_PAIRE: 2.65,
PABLO_CUEVAS: 1.48
}
},
{
'round': 16,
'players': [
TAYLOR_FRITZ,
RICHARD_GASQUET
],
'score': [],
'retired': True,
'odds': {
TAYLOR_FRITZ: 1.54,
RICHARD_GASQUET: 2.30
}
},
{
'round': 16,
'players': [
DENIS_SHAPOVALOV,
UGO_HUMBERT
],
'score': [(2, 6), (7, 6), (6, 2)],
'odds': {
DENIS_SHAPOVALOV: 1.47,
UGO_HUMBERT: 2.60
}
},
{
'round': 16,
'players': [
ROBERTO_BAUTISTA_AGUT,
CORENTIN_MOUTET
],
'score': [(4, 6), (6, 4), (6, 3)],
'odds': {
ROBERTO_BAUTISTA_AGUT: 1.22,
CORENTIN_MOUTET: 4.15
}
},
{
'round': 16,
'players': [
NIKOLOZ_BASILASHVILI,
TRISTAN_LAMASINE
],
'score': [(7, 5), (7, 5)],
'odds': {
NIKOLOZ_BASILASHVILI: 1.25,
TRISTAN_LAMASINE: 3.85
}
},
# 2019-05-23
{
'round': 8,
'players': [
FELIX_AUGER_ALIASSIME,
STEVE_JOHNSON
],
'score': [(6, 4), (2, 6), (6, 4)],
'odds': {
FELIX_AUGER_ALIASSIME: 1.33,
STEVE_JOHNSON: 3.30
}
},
{
'round': 8,
'players': [
BENOIT_PAIRE,
DENIS_SHAPOVALOV
],
'score': [(6, 3), (4, 6), (7, 6)],
'odds': {
BENOIT_PAIRE: 1.80,
DENIS_SHAPOVALOV: 2.00
}
},
{
'round': 8,
'players': [
TAYLOR_FRITZ,
ROBERTO_BAUTISTA_AGUT
],
'score': [(6, 7), (6, 3), (6, 4)],
'odds': {
TAYLOR_FRITZ: 2.40,
ROBERTO_BAUTISTA_AGUT: 1.56
}
},
{
'round': 8,
'players': [
NIKOLOZ_BASILASHVILI,
JO_WILFRIED_TSONGA
],
'score': [(6, 4), (6, 4)],
'odds': {
NIKOLOZ_BASILASHVILI: 2.22,
JO_WILFRIED_TSONGA: 1.65
}
},
# 2019-05-24
{
'round': 4,
'players': [
BENOIT_PAIRE,
TAYLOR_FRITZ
],
'score': [(6, 4), (6, 2)],
'odds': {
BENOIT_PAIRE: 2.04,
TAYLOR_FRITZ: 1.71
}
},
{
'round': 4,
'players': [
FELIX_AUGER_ALIASSIME,
NIKOLOZ_BASILASHVILI
],
'score': [(2, 6), (7, 6), (6, 4)],
'odds': {
FELIX_AUGER_ALIASSIME: 1.64,
NIKOLOZ_BASILASHVILI: 2.15
}
},
# 2019-05-25
{
'round': 2,
'players': [
BENOIT_PAIRE,
FELIX_AUGER_ALIASSIME
],
'score': [(6, 4), (6, 3)],
'odds': {
BENOIT_PAIRE: 1.95,
FELIX_AUGER_ALIASSIME: 1.80
}
}
]
},
{
'location': ROLAND_GARROS,
'date': '2019-05-26',
'matches': [
{
'round': 128,
'players': [
CASPER_RUUD,
ERNESTS_GULBIS
],
'score': [(6, 2), (7, 6), (6, 0)],
'odds': {
CASPER_RUUD: 1.26,
ERNESTS_GULBIS: 3.80
}
},
{
'round': 128,
'players': [
ALEXEI_POPYRIN,
UGO_HUMBERT
],
'score': [(3, 6), (6, 3), (7, 6), (6, 3)],
'odds': {
ALEXEI_POPYRIN: 2.00,
UGO_HUMBERT: 1.77
}
},
{
'round': 128,
'players': [
OSCAR_OTTE,
MALEK_JAZIRI,
],
'score': [(6, 3), (6, 1), (4, 6), (6, 0)],
'odds': {
OSCAR_OTTE: 1.75,
MALEK_JAZIRI: 2.00
}
},
{
'round': 128,
'players': [
LEONARDO_MAYER,
JIRI_VESELY
],
'score': [(7, 6), (6, 3), (6, 0)],
'odds': {
LEONARDO_MAYER: 1.63,
JIRI_VESELY: 2.33
}
},
{
'round': 128,
'players': [
PHILIPP_KOHLSCHREIBER,
ROBIN_HAASE,
],
'score': [(6, 4), (6, 4), (6, 7), (6, 1)],
'odds': {
PHILIPP_KOHLSCHREIBER: 1.22,
ROBIN_HAASE: 4.25
}
},
{
'round': 128,
'players': [
GRIGOR_DIMITROV,
JANKO_TIPSAREVIC,
],
'score': [(6, 3), (6, 0), (3, 6), (6, 7), (6, 4)],
'odds': {
GRIGOR_DIMITROV: 1.17,
JANKO_TIPSAREVIC: 5.21
}
},
{
'round': 128,
'players': [
HUGO_DELLIEN,
PRAJNESH_GUNNESWARAN
],
'score': [(6, 1), (6, 3), (6, 1)],
'odds': {
HUGO_DELLIEN: 1.20,
PRAJNESH_GUNNESWARAN: 4.57
}
},
{
'round': 128,
'players': [
LASLO_DJERE,
ALBERT_RAMOS_VINOLAS
],
'score': [(6, 3), (6, 2), (7, 6)],
'odds': {
LASLO_DJERE: 1.74,
ALBERT_RAMOS_VINOLAS: 2.13
}
},
{
'round': 128,
'players': [
MATTEO_BERRETTINI,
PABLO_ANDUJAR
],
'score': [(6, 7), (6, 4), (6, 4), (6, 2)],
'odds': {
MATTEO_BERRETTINI: 1.19,
PABLO_ANDUJAR: 5.00
}
},
{
'round': 128,
'players': [
DAVID_GOFFIN,
RICARDAS_BERANKIS
],
'score': [(6, 0), (6, 2), (6, 2)],
'odds': {
DAVID_GOFFIN: 1.07,
RICARDAS_BERANKIS: 9.37
}
},
{
'round': 128,
'players': [
DIEGO_SCHWARTZMAN,
MARTON_FUCSOVICS
],
'score': [(6, 3), (3, 6), (7, 6), (2, 6), (6, 2)],
'odds': {
DIEGO_SCHWARTZMAN: 1.31,
MARTON_FUCSOVICS: 3.30
}
},
{
'round': 128,
'players': [
NICOLAS_MAHUT,
MARCO_CECCHINATO
],
'score': [(2, 6), (6, 7), (6, 4), (6, 2), (6, 4)],
'odds': {
NICOLAS_MAHUT: 8.00,
MARCO_CECCHINATO: 1.09
}
},
{
'round': 128,
'players': [
MARIN_CILIC,
THOMAS_FABBIANO
],
'score': [(6, 3), (7, 5), (6, 1)],
'odds': {
MARIN_CILIC: 1.08,
THOMAS_FABBIANO: 8.11
}
},
{
'round': 128,
'players': [
KEI_NISHIKORI,
QUENTIN_HALYS
],
'score': [(6, 2), (6, 3), (6, 4)],
'odds': {
KEI_NISHIKORI: 1.15,
QUENTIN_HALYS: 5.65
}
},
{
'round': 128,
'players': [
STEFANOS_TSITSIPAS,
MAXIMILIAN_MARTERER,
],
'score': [(6, 2), (6, 2), (7, 6)],
'odds': {
STEFANOS_TSITSIPAS: 1.02,
MAXIMILIAN_MARTERER: 17.73
}
},
{
'round': 128,
'players': [
ROGER_FEDERER,
LORENZO_SONEGO
],
'score': [(6, 2), (6, 4), (6, 4)],
'odds': {
ROGER_FEDERER: 1.10,
LORENZO_SONEGO: 7.00
}
},
{
'round': 128,
'players': [
JO_WILFRIED_TSONGA,
PETER_GOJOWCZYK
],
'score': [(7, 6), (6, 1), (4, 6), (6, 3)],
'odds': {
JO_WILFRIED_TSONGA: 1.22,
PETER_GOJOWCZYK: 4.70
}
},
{
'round': 128,
'players': [
BENOIT_PAIRE,
MARIUS_COPIL
],
'score': [(6, 4), (6, 7), (6, 0), (6, 1)],
'odds': {
BENOIT_PAIRE: 1.18,
MARIUS_COPIL: 5.01
}
},
{
'round': 128,
'players': [
CORENTIN_MOUTET,
ALEXEY_VATUTIN,
],
'score': [(6, 4), (7, 6), (6, 4)],
'odds': {
CORENTIN_MOUTET: 1.51,
ALEXEY_VATUTIN: 2.65
}
},
{
'round': 128,
'players': [
YANNICK_MADEN,
KIMMER_COPPEJANS,
],
'score': [(7, 6), (7, 5), (6, 3)],
'odds': {
YANNICK_MADEN: 1.45,
KIMMER_COPPEJANS: 2.75
}
},
{
'round': 128,
'players': [
HENRI_LAAKSONEN,
PEDRO_MARTINEZ
],
'score': [(6, 1), (6, 0), (7, 6)],
'odds': {
HENRI_LAAKSONEN: 1.83,
PEDRO_MARTINEZ: 1.91
}
},
{
'round': 128,
'players': [
MIOMIR_KECMANOVIC,
DENIS_KUDLA
],
'score': [(6, 0), (6, 7), (5, 7), (6, 3), (6, 4)],
'odds': {
MIOMIR_KECMANOVIC: 1.44,
DENIS_KUDLA: 2.80
}
},
{
'round': 128,
'players': [
LLOYD_HARRIS,
LUKAS_ROSOL
],
'score': [(6, 1), (4, 6), (2, 6), (6, 1), (6, 2)],
'odds': {
LLOYD_HARRIS: 1.61,
LUKAS_ROSOL: 2.41
}
},
{
'round': 128,
'players': [
RICHARD_GASQUET,
MISCHA_ZVEREV
],
'score': [(6, 3), (6, 4), (6, 3)],
'odds': {
RICHARD_GASQUET: 1.11,
MISCHA_ZVEREV: 6.27
}
},
{
'round': 128,
'players': [
CHRISTIAN_GARIN,
REILLY_OPELKA
],
'score': [(7, 6), (7, 5), (7, 6)],
'odds': {
CHRISTIAN_GARIN: 1.37,
REILLY_OPELKA: 3.20
}
},
{
'round': 128,
'players': [
PABLO_CUEVAS,
MAXIME_JANVIER
],
'score': [(6, 4), (6, 4), (6, 2)],
'odds': {
PABLO_CUEVAS: 1.17,
MAXIME_JANVIER: 5.47
}
},
{
'round': 128,
'players': [
SALVATORE_CARUSO,
JAUME_MUNAR
],
'score': [(7, 5), (4, 6), (6, 3), (6, 3)],
'odds': {
SALVATORE_CARUSO: 4.00,
JAUME_MUNAR: 1.25
}
},
{
'round': 128,
'players': [
PABLO_CARRENO_BUSTA,
JOAO_SOUSA
],
'score': [(6, 3), (6, 1), (6, 2)],
'odds': {
PABLO_CARRENO_BUSTA: 1.78,
JOAO_SOUSA: 1.91
}
},
{
'round': 128,
'players': [
ROBERTO_CARBALLES_BAENA,
ALEXANDRE_MULLER
],
'score': [(6, 4), (6, 4), (6, 2)],
'odds': {
ROBERTO_CARBALLES_BAENA: 1.18,
ALEXANDRE_MULLER: 5.15
}
},
{
'round': 128,
'players': [
ALEXANDER_BUBLIK,
RUDOLF_MOLLEKER
],
'score': [(7, 5), (6, 7), (6, 1), (7, 6)],
'odds': {
ALEXANDER_BUBLIK: 3.18,
RUDOLF_MOLLEKER: 1.38
}
},
{
'round': 128,
'players': [
RADU_ALBOT,
TENNYS_SANDGREN
],
'score': [(7, 6), (7, 6), (3, 6), (6, 1)],
'odds': {
RADU_ALBOT: 1.31,
TENNYS_SANDGREN: 3.32
}
},
{
'round': 128,
'players': [
FILIP_KRAJINOVIC,
FRANCES_TIAFOE
],
'score': [(6, 2), (4, 6), (6, 3), (3, 6), (6, 0)],
'odds': {
FILIP_KRAJINOVIC: 1.42,
FRANCES_TIAFOE: 2.88
}
},
{
'round': 128,
'players': [
GILLES_SIMON,
SERGIY_STAKHOVSKY
],
'score': [(6, 3), (6, 3), (6, 4)],
'odds': {
GILLES_SIMON: 1.14,
SERGIY_STAKHOVSKY: 6.06
}
},
{
'round': 128,
'players': [
STAN_WAWRINKA,
JOZEF_KOVALIK
],
'score': [(6, 1), (6, 7), (6, 2), (6, 3)],
'odds': {
STAN_WAWRINKA: 1.09,
JOZEF_KOVALIK: 7.14
}
},
{
'round': 128,
'players': [
ALEX_DE_MINAUR,
BRADLEY_KLAHN
],
'score': [(6, 1), (6, 4), (6, 4)],
'odds': {
ALEX_DE_MINAUR: 1.17,
BRADLEY_KLAHN: 5.47
}
},
{
'round': 128,
'players': [
JAN_LENNARD_STRUFF,
DENIS_SHAPOVALOV
],
'score': [(7, 6), (6, 3), (6, 4)],
'odds': {
JAN_LENNARD_STRUFF: 1.50,
DENIS_SHAPOVALOV: 2.68
}
},
{
'round': 128,
'players': [
GUIDO_PELLA,
GUIDO_ANDREOZZI
],
'score': [(7, 6), (6, 4), (1, 6), (6, 1)],
'odds': {
GUIDO_PELLA: 1.24,
GUIDO_ANDREOZZI: 4.25
}
},
{
'round': 128,
'players': [
JUAN_IGNACIO_LONDERO,
NIKOLOZ_BASILASHVILI
],
'score': [(6, 4), (6, 1), (6, 3)],
'odds': {
JUAN_IGNACIO_LONDERO: 2.52,
NIKOLOZ_BASILASHVILI: 1.48
}
},
{
'round': 128,
'players': [
BORNA_CORIC,
ALJAZ_BEDENE
],
'score': [(6, 1), (6, 7), (6, 4), (6, 4)],
'odds': {
BORNA_CORIC: 1.18,
ALJAZ_BEDENE: 4.67
}
},
{
'round': 128,
'players': [
PIERRE_HUGUES_HERBERT,
DANIIL_MEDVEDEV
],
'score': [(4, 6), (4, 6), (6, 3), (6, 2), (7, 5)],
'odds': {
PIERRE_HUGUES_HERBERT: 3.43,
DANIIL_MEDVEDEV: 1.33
}
},
{
'round': 128,
'players': [
DOMINIC_THIEM,
TOMMY_PAUL
],
'score': [(6, 4), (4, 6), (7, 6), (6, 2)],
'odds': {
DOMINIC_THIEM: 1.02,
TOMMY_PAUL: 18.13
}
},
{
'round': 128,
'players': [
RAFAEL_NADAL,
YANNICK_HANFMANN
],
'score': [(6, 2), (6, 1), (6, 3)],
'odds': {
RAFAEL_NADAL: 1.00,
YANNICK_HANFMANN: 22.23
}
},
{
'round': 128,
'players': [
NOVAK_DJOKOVIC,
HUBERT_HURKACZ
],
'score': [(6, 4), (6, 2), (6, 2)],
'odds': {
NOVAK_DJOKOVIC: 1.03,
HUBERT_HURKACZ: 13.84
}
},
{
'round': 128,
'players': [
MIKAEL_YMER,
BLAZ_ROLA
],
'score': [(6, 0), (6, 3), (7, 6)],
'odds': {
MIKAEL_YMER: 1.65,
BLAZ_ROLA: 2.20
}
},
{
'round': 128,
'players': [
JORDAN_THOMPSON,
ALEJANDRO_DAVIDOVICH_FOKINA
],
'score': [(6, 3), (6, 2), (7, 6)],
'odds': {
JORDAN_THOMPSON: 2.75,
ALEJANDRO_DAVIDOVICH_FOKINA: 1.48
}
},
{
'round': 128,
'players': [
YOSHIHITO_NISHIOKA,
MACKENZIE_MCDONALD
],
'score': [(6, 7), (6, 0), (4, 6), (6, 2), (6, 3)],
'odds': {
YOSHIHITO_NISHIOKA: 1.32,
MACKENZIE_MCDONALD: 2.95
}
},
{
'round': 128,
'players': [
ADRIAN_MANNARINO,
STEFANO_TRAVAGLIA
],
'score': [(6, 7), (6, 3), (3, 6), (6, 2), (6, 2)],
'odds': {
ADRIAN_MANNARINO: 2.70,
STEFANO_TRAVAGLIA: 1.46
}
},
{
'round': 128,
'players': [
MARTIN_KLIZAN,
MIKHAIL_KUKUSHKIN
],
'score': [(3, 6), (5, 7), (6, 4), (6, 2), (6, 3)],
'odds': {
MARTIN_KLIZAN: 1.56,
MIKHAIL_KUKUSHKIN: 2.45
}
},
{
'round': 128,
'players': [
IVO_KARLOVIC,
FELICIANO_LOPEZ
],
'score': [(7, 6), (7, 5), (6, 7), (7, 5)],
'odds': {
IVO_KARLOVIC: 2.63,
FELICIANO_LOPEZ: 1.45
}
},
{
'round': 128,
'players': [
ANTOINE_HOANG,
DAMIR_DZUMHUR
],
'score': [(6, 4), (0, 6), (7, 6), (6, 3)],
'odds': {
ANTOINE_HOANG: 3.45,
DAMIR_DZUMHUR: 1.32
}
},
{
'round': 128,
'players': [
TAYLOR_FRITZ,
BERNARD_TOMIC
],
'score': [(6, 1), (6, 4), (6, 1)],
'odds': {
TAYLOR_FRITZ: 1.12,
BERNARD_TOMIC: 5.77
}
},
{
'round': 128,
'players': [
FEDERICO_DELBONIS,
GUILLERMO_GARCIA_LOPEZ
],
'score': [(6, 1), (3, 6), (6, 3), (6, 2)],
'odds': {
FEDERICO_DELBONIS: 1.30,
GUILLERMO_GARCIA_LOPEZ: 3.45
}
},
{
'round': 128,
'players': [
ELLIOT_BENCHETRIT,
CAMERON_NORRIE
],
'score': [(6, 3), (6, 0), (6, 2)],
'odds': {
ELLIOT_BENCHETRIT: 2.24,
CAMERON_NORRIE: 1.61
}
},
{
'round': 128,
'players': [
GREGOIRE_BARRERE,
MATTHEW_EBDEN
],
'score': [(6, 3), (5, 7), (7, 5), (6, 1)],
'odds': {
GREGOIRE_BARRERE: 1.24,
MATTHEW_EBDEN: 4.00
}
},
{
'round': 128,
'players': [
DUSAN_LAJOVIC,
THIAGO_MONTEIRO
],
'score': [(6, 3), (6, 4), (6, 4)],
'odds': {
DUSAN_LAJOVIC: 1.45,
THIAGO_MONTEIRO: 2.63
}
},
{
'round': 128,
'players': [
KYLE_EDMUND,
JEREMY_CHARDY
],
'score': [(7, 6), (5, 7), (6, 4), (4, 6), (7, 5)]
},
{
'round': 128,
'players': [
FERNANDO_VERDASCO,
DANIEL_EVANS
],
'score': [(6, 3), (6, 7), (6, 3), (6, 2)],
'odds': {
FERNANDO_VERDASCO: 1.22,
DANIEL_EVANS: 3.34
}
},
{
'round': 128,
'players': [
LUCAS_POUILLE,
SIMONE_BOLELLI
],
'score': [(6, 3), (6, 4), (7, 5)],
'odds': {
LUCAS_POUILLE: 1.43,
SIMONE_BOLELLI: 2.95
}
},
{
'round': 128,
'players': [
ROBERTO_BAUTISTA_AGUT,
STEVE_JOHNSON
],
'score': [(6, 3), (6, 4), (6, 2)],
'odds': {
ROBERTO_BAUTISTA_AGUT: 1.19,
STEVE_JOHNSON: 4.60
}
},
{
'round': 128,
'players': [
GAEL_MONFILS,
TARO_DANIEL
],
'score': [(6, 0), (6, 4), (6, 1)],
'odds': {
GAEL_MONFILS: 1.11,
TARO_DANIEL: 6.00
}
},
{
'round': 128,
'players': [
KAREN_KHACHANOV,
CEDRIC_MARCEL_STEBE
],
'score': [(6, 1), (6, 1), (6, 4)],
'odds': {
KAREN_KHACHANOV: 1.02,
CEDRIC_MARCEL_STEBE: 13.00
}
},
{
'round': 128,
'players': [
FABIO_FOGNINI,
ANDREAS_SEPPI
],
'score': [(6, 3), (6, 0), (3, 6), (6, 3)],
'odds': {
FABIO_FOGNINI: 1.14,
ANDREAS_SEPPI: 5.50
}
},
{
'round': 128,
'players': [
JUAN_MARTIN_DEL_POTRO,
NICOLAS_JARRY
],
'score': [(3, 6), (6, 2), (6, 1), (6, 4)],
'odds': {
JUAN_MARTIN_DEL_POTRO: 1.25,
NICOLAS_JARRY: 4.10
}
},
{
'round': 128,
'players': [
ALEXANDER_ZVEREV,
JOHN_MILLMAN
],
'score': [(7, 6), (6, 3), (2, 6), (6, 7), (6, 3)],
'odds': {
ALEXANDER_ZVEREV: 1.11,
JOHN_MILLMAN: 7.01
}
},
{
'round': 64,
'players': [
BENOIT_PAIRE,
PIERRE_HUGUES_HERBERT
],
'score': [(6, 2), (6, 2), (5, 7), (6, 7), (11, 9)],
'odds': {
BENOIT_PAIRE: 1.42,
PIERRE_HUGUES_HERBERT: 2.73
}
},
{
'round': 64,
'players': [
NICOLAS_MAHUT,
PHILIPP_KOHLSCHREIBER
],
'score': [(6, 3), (6, 3), (6, 3)],
'odds': {
NICOLAS_MAHUT: 6.62,
PHILIPP_KOHLSCHREIBER: 1.10
}
},
{
'round': 64,
'players': [
JUAN_IGNACIO_LONDERO,
RICHARD_GASQUET
],
'score': [(6, 2), (3, 6), (6, 3), (6, 4)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.91,
RICHARD_GASQUET: 1.87
}
},
{
'round': 64,
'players': [
FILIP_KRAJINOVIC,
ROBERTO_CARBALLES_BAENA
],
'score': [(6, 4), (6, 4), (6, 7), (3, 6), (8, 6)],
'odds': {
FILIP_KRAJINOVIC: 1.44,
ROBERTO_CARBALLES_BAENA: 2.85
}
},
{
'round': 64,
'players': [
LASLO_DJERE,
ALEXEI_POPYRIN
],
'score': [(6, 4), (7, 6), (6, 4)],
'odds': {
LASLO_DJERE: 1.24,
ALEXEI_POPYRIN: 4.00
}
},
{
'round': 64,
'players': [
CASPER_RUUD,
MATTEO_BERRETTINI
],
'score': [(6, 4), (7, 5), (6, 3)],
'odds': {
CASPER_RUUD: 2.55,
MATTEO_BERRETTINI: 1.51
}
},
{
'round': 64,
'players': [
DAVID_GOFFIN,
MIOMIR_KECMANOVIC
],
'score': [(6, 2), (6, 4), (6, 3)],
'odds': {
DAVID_GOFFIN: 1.11,
MIOMIR_KECMANOVIC: 6.25
}
},
{
'round': 64,
'players': [
STAN_WAWRINKA,
CHRISTIAN_GARIN
],
'score': [(6, 1), (6, 4), (6, 0)],
'odds': {
STAN_WAWRINKA: 1.53,
CHRISTIAN_GARIN: 2.50
}
},
{
'round': 64,
'players': [
PABLO_CARRENO_BUSTA,
ALEX_DE_MINAUR
],
'score': [(6, 3), (6, 1), (6, 1)],
'odds': {
PABLO_CARRENO_BUSTA: 1.43,
ALEX_DE_MINAUR: 2.84
}
},
{
'round': 64,
'players': [
CORENTIN_MOUTET,
GUIDO_PELLA
],
'score': [(6, 3), (6, 1), (2, 6), (7, 5)],
'odds': {
CORENTIN_MOUTET: 4.05,
GUIDO_PELLA: 1.22
}
},
{
'round': 64,
'players': [
GRIGOR_DIMITROV,
MARIN_CILIC
],
'score': [(6, 7), (6, 4), (4, 6), (7, 6), (6, 3)],
'odds': {
GRIGOR_DIMITROV: 2.45,
MARIN_CILIC: 1.54
}
},
{
'round': 64,
'players': [
KEI_NISHIKORI,
JO_WILFRIED_TSONGA
],
'score': [(4, 6), (6, 4), (6, 4), (6, 4)],
'odds': {
KEI_NISHIKORI: 1.32,
JO_WILFRIED_TSONGA: 3.25
}
},
{
'round': 64,
'players': [
STEFANOS_TSITSIPAS,
HUGO_DELLIEN
],
'score': [(4, 6), (6, 0), (6, 3), (7, 5)],
'odds': {
STEFANOS_TSITSIPAS: 1.07,
HUGO_DELLIEN: 7.50
}
},
{
'round': 64,
'players': [
ROGER_FEDERER,
OSCAR_OTTE
],
'score': [(6, 4), (6, 3), (6, 4)],
'odds': {
ROGER_FEDERER: 1.02,
OSCAR_OTTE: 16.78
}
},
{
'round': 64,
'players': [
RAFAEL_NADAL,
YANNICK_MADEN
],
'score': [(6, 1), (6, 2), (6, 4)],
'odds': {
RAFAEL_NADAL: 1.01,
YANNICK_MADEN: 22.00
}
},
# 2019-05-30
{
'round': 64,
'players': [
JORDAN_THOMPSON,
IVO_KARLOVIC
],
'score': [(6, 3), (6, 4), (6, 7), (6, 3)],
'odds': {
JORDAN_THOMPSON: 1.47,
IVO_KARLOVIC: 2.73
}
},
{
'round': 64,
'players': [
JAN_LENNARD_STRUFF,
RADU_ALBOT
],
'score': [(7, 6), (7, 6), (6, 7), (6, 2)],
'odds': {
JAN_LENNARD_STRUFF: 1.37,
RADU_ALBOT: 3.09
}
},
{
'round': 64,
'players': [
DUSAN_LAJOVIC,
ELLIOT_BENCHETRIT
],
'score': [(6, 3), (6, 3), (6, 3)],
'odds': {
DUSAN_LAJOVIC: 1.31,
ELLIOT_BENCHETRIT: 3.55
}
},
{
'round': 64,
'players': [
PABLO_CUEVAS,
KYLE_EDMUND
],
'score': [(7, 6), (6, 3), (2, 1)],
'retired': True,
'odds': {
PABLO_CUEVAS: 1.61,
KYLE_EDMUND: 2.20
}
},
{
'round': 64,
'players': [
SALVATORE_CARUSO,
GILLES_SIMON
],
'score': [(6, 1), (6, 2), (6, 4)],
'odds': {
SALVATORE_CARUSO: 3.24,
GILLES_SIMON: 1.33
}
},
{
'round': 64,
'players': [
ANTOINE_HOANG,
FERNANDO_VERDASCO
],
'score': [(6, 4), (3, 6), (7, 6), (7, 5)],
'odds': {
ANTOINE_HOANG: 5.75,
FERNANDO_VERDASCO: 1.13
}
},
{
'round': 64,
'players': [
ROBERTO_BAUTISTA_AGUT,
TAYLOR_FRITZ
],
'score': [(6, 2), (6, 3), (6, 2)],
'odds': {
ROBERTO_BAUTISTA_AGUT: 1.44,
TAYLOR_FRITZ: 2.68
}
},
{
'round': 64,
'players': [
LEONARDO_MAYER,
DIEGO_SCHWARTZMAN
],
'score': [(4, 6), (6, 3), (6, 4), (7, 5)]
},
{
'round': 64,
'players': [
GAEL_MONFILS,
ADRIAN_MANNARINO
],
'score': [(6, 3), (6, 4), (6, 4)],
'odds': {
GAEL_MONFILS: 1.09,
ADRIAN_MANNARINO: 7.00
}
},
{
'round': 64,
'players': [
BORNA_CORIC,
LLOYD_HARRIS
],
'score': [(6, 2), (6, 3), (7, 6)],
'odds': {
BORNA_CORIC: 1.06,
LLOYD_HARRIS: 9.50
}
},
{
'round': 64,
'players': [
KAREN_KHACHANOV,
GREGOIRE_BARRERE
],
'score': [(6, 3), (7, 6), (0, 6), (7, 5)],
'odds': {
KAREN_KHACHANOV: 1.10,
GREGOIRE_BARRERE: 6.75
}
},
{
'round': 64,
'players': [
FABIO_FOGNINI,
FEDERICO_DELBONIS
],
'score': [(6, 4), (3, 6), (6, 3), (6, 3)],
'odds': {
FABIO_FOGNINI: 1.45,
FEDERICO_DELBONIS: 2.70
}
},
{
'round': 64,
'players': [
JUAN_MARTIN_DEL_POTRO,
YOSHIHITO_NISHIOKA
],
'score': [(5, 7), (6, 4), (6, 2), (6, 7), (6, 2)],
'odds': {
JUAN_MARTIN_DEL_POTRO: 1.07,
YOSHIHITO_NISHIOKA: 7.50
}
},
{
'round': 64,
'players': [
ALEXANDER_ZVEREV,
MIKAEL_YMER
],
'score': [(6, 1), (6, 3), (7, 6)],
'odds': {
ALEXANDER_ZVEREV: 1.13,
MIKAEL_YMER: 6.50
}
},
{
'round': 64,
'players': [
DOMINIC_THIEM,
ALEXANDER_BUBLIK
],
'score': [(6, 3), (6, 7), (6, 3), (7, 5)],
'odds': {
DOMINIC_THIEM: 1.01,
ALEXANDER_BUBLIK: 19.00
}
},
{
'round': 64,
'players': [
NOVAK_DJOKOVIC,
HENRI_LAAKSONEN
],
'score': [(6, 1), (6, 4), (6, 3)],
'odds': {
NOVAK_DJOKOVIC: 1.01,
HENRI_LAAKSONEN: 22.74
}
},
# 2019-05-31
{
'round': 32,
'players': [
BENOIT_PAIRE,
PABLO_CARRENO_BUSTA
],
'score': [(6, 2), (4, 6), (7, 6)],
'retired': True,
'odds': {
BENOIT_PAIRE: 2.22,
PABLO_CARRENO_BUSTA: 1.56
}
},
{
'round': 32,
'players': [
LEONARDO_MAYER,
NICOLAS_MAHUT
],
'score': [(3, 6), (7, 6), (6, 4), (7, 6)],
'odds': {
LEONARDO_MAYER: 1.24,
NICOLAS_MAHUT: 3.75
}
},
{
'round': 32,
'players': [
JUAN_IGNACIO_LONDERO,
CORENTIN_MOUTET
],
'score': [(2, 6), (6, 3), (6, 4), (5, 7), (6, 4)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.56,
CORENTIN_MOUTET: 2.36
}
},
{
'round': 32,
'players': [
KEI_NISHIKORI,
LASLO_DJERE
],
'score': [(6, 4), (6, 7), (6, 3), (4, 6), (8, 6)],
'odds': {
KEI_NISHIKORI: 1.31,
LASLO_DJERE: 3.63
}
},
{
'round': 32,
'players': [
ROGER_FEDERER,
CASPER_RUUD
],
'score': [(6, 3), (6, 1), (7, 6)],
'odds': {
ROGER_FEDERER: 1.12,
CASPER_RUUD: 6.00
}
},
{
'round': 32,
'players': [
RAFAEL_NADAL,
DAVID_GOFFIN
],
'score': [(6, 1), (6, 3), (4, 6), (6, 3)],
'odds': {
RAFAEL_NADAL: 1.02,
DAVID_GOFFIN: 13.32
}
},
# 2019-06-01
{
'round': 32,
'players': [
STAN_WAWRINKA,
GRIGOR_DIMITROV
],
'score': [(7, 6), (7, 6), (7, 6)],
# no odds
},
{
'round': 32,
'players': [
GAEL_MONFILS,
ANTOINE_HOANG
],
'score': [(6, 3), (6, 2), (6, 3)],
'odds': {
GAEL_MONFILS: 1.07,
ANTOINE_HOANG: 7.50
}
},
{
'round': 32,
'players': [
JAN_LENNARD_STRUFF,
BORNA_CORIC
],
'score': [(4, 6), (6, 1), (4, 6), (7, 6), (11, 9)],
'odds': {
JAN_LENNARD_STRUFF: 2.92,
BORNA_CORIC: 1.40
}
},
{
'round': 32,
'players': [
KAREN_KHACHANOV,
MARTIN_KLIZAN
],
'score': [(6, 1), (6, 4), (6, 3)],
'odds': {
KAREN_KHACHANOV: 1.28,
MARTIN_KLIZAN: 3.60
}
},
{
'round': 32,
'players': [
FABIO_FOGNINI,
ROBERTO_BAUTISTA_AGUT
],
'score': [(7, 6), (6, 4), (4, 6), (6, 1)],
'odds': {
FABIO_FOGNINI: 1.67,
ROBERTO_BAUTISTA_AGUT: 2.19
}
},
{
'round': 32,
'players': [
JUAN_MARTIN_DEL_POTRO,
JORDAN_THOMPSON
],
'score': [(6, 4), (6, 4), (6, 0)],
'odds': {
JUAN_MARTIN_DEL_POTRO: 1.27,
JORDAN_THOMPSON: 3.75
}
},
{
'round': 32,
'players': [
STEFANOS_TSITSIPAS,
FILIP_KRAJINOVIC
],
'score': [(7, 5), (6, 3), (6, 7), (7, 6)],
# no odds
},
{
'round': 32,
'players': [
ALEXANDER_ZVEREV,
DUSAN_LAJOVIC
],
'score': [(6, 4), (6, 2), (4, 6), (1, 6), (6, 2)],
'odds': {
ALEXANDER_ZVEREV: 1.31,
DUSAN_LAJOVIC: 3.63
}
},
{
'round': 32,
'players': [
DOMINIC_THIEM,
PABLO_CUEVAS
],
'score': [(6, 3), (4, 6), (6, 2), (7, 5)],
'odds': {
DOMINIC_THIEM: 1.21,
PABLO_CUEVAS: 4.20
}
},
{
'round': 32,
'players': [
NOVAK_DJOKOVIC,
SALVATORE_CARUSO
],
'score': [(6, 3), (6, 3), (6, 2)],
'odds': {
NOVAK_DJOKOVIC: 1.01,
SALVATORE_CARUSO: 20.86
}
},
# 2019-06-02
{
'round': 16,
'players': [
STAN_WAWRINKA,
STEFANOS_TSITSIPAS
],
'score': [(7, 6), (5, 7), (6, 4), (3, 6), (8, 6)],
'odds': {
STAN_WAWRINKA: 2.10,
STEFANOS_TSITSIPAS: 1.71
}
},
{
'round': 16,
'players': [
ROGER_FEDERER,
LEONARDO_MAYER
],
'score': [(6, 2), (6, 3), (6, 3)],
'odds': {
ROGER_FEDERER: 1.07,
LEONARDO_MAYER: 8.00
}
},
{
'round': 16,
'players': [
RAFAEL_NADAL,
JUAN_IGNACIO_LONDERO
],
'score': [(6, 2), (6, 3), (6, 3)],
'odds': {
RAFAEL_NADAL: 1.01,
JUAN_IGNACIO_LONDERO: 23.00
}
},
# 2019-06-03
{
'round': 16,
'players': [
KAREN_KHACHANOV,
JUAN_MARTIN_DEL_POTRO
],
'score': [(7, 5), (6, 3), (3, 6), (6, 3)],
'odds': {
KAREN_KHACHANOV: 2.75,
JUAN_MARTIN_DEL_POTRO: 1.41
}
},
{
'round': 16,
'players': [
KEI_NISHIKORI,
BENOIT_PAIRE
],
'score': [(6, 2), (6, 7), (6, 2), (6, 7), (6, 2)],
# no odds
},
{
'round': 16,
'players': [
ALEXANDER_ZVEREV,
FABIO_FOGNINI
],
'score': [(3, 6), (6, 2), (6, 2), (7, 6)],
'odds': {
ALEXANDER_ZVEREV: 2.06,
FABIO_FOGNINI: 1.74
}
},
{
'round': 16,
'players': [
DOMINIC_THIEM,
GAEL_MONFILS
],
'score': [(6, 4), (6, 4), (6, 2)],
'odds': {
DOMINIC_THIEM: 1.36,
GAEL_MONFILS: 3.15
}
},
{
'round': 16,
'players': [
NOVAK_DJOKOVIC,
JAN_LENNARD_STRUFF
],
'score': [(6, 3), (6, 2), (6, 2)],
'odds': {
NOVAK_DJOKOVIC: 1.04,
JAN_LENNARD_STRUFF: 13.00
}
},
# 2019-06-04
{
'round': 8,
'players': [
ROGER_FEDERER,
STAN_WAWRINKA
],
'score': [(7, 6), (4, 6), (7, 6), (6, 4)],
'odds': {
ROGER_FEDERER: 1.22,
STAN_WAWRINKA: 3.05
}
},
{
'round': 8,
'players': [
RAFAEL_NADAL,
KEI_NISHIKORI
],
'score': [(6, 1), (6, 1), (6, 3)],
'odds': {
RAFAEL_NADAL: 1.02,
KEI_NISHIKORI: 14.75
}
},
# 2019-06-06
{
'round': 8,
'players': [
DOMINIC_THIEM,
KAREN_KHACHANOV
],
'score': [(6, 2), (6, 4), (6, 2)],
'odds': {
DOMINIC_THIEM: 1.27,
KAREN_KHACHANOV: 3.75
}
},
{
'round': 8,
'players': [
NOVAK_DJOKOVIC,
ALEXANDER_ZVEREV
],
'score': [(7, 5), (6, 2), (6, 2)],
'odds': {
NOVAK_DJOKOVIC: 1.13,
ALEXANDER_ZVEREV: 7.07
}
},
# 2019-06-07
{
'round': 4,
'players': [
RAFAEL_NADAL,
ROGER_FEDERER
],
'score': [(6, 3), (6, 4), (6, 2)],
'odds': {
RAFAEL_NADAL: 1.13,
ROGER_FEDERER: 6.00
}
},
# 2019-06-08
{
'round': 4,
'players': [
DOMINIC_THIEM,
NOVAK_DJOKOVIC
],
'score': [(6, 2), (3, 6), (7, 5), (5, 7), (7, 5)],
'odds': {
DOMINIC_THIEM: 1.20,
NOVAK_DJOKOVIC: 2.05
}
},
# 2019-06-09
{
'round': 2,
'players': [
RAFAEL_NADAL,
DOMINIC_THIEM
],
'score': [(6, 3), (5, 7), (6, 1), (6, 1)],
'odds': {
RAFAEL_NADAL: 1.19,
DOMINIC_THIEM: 4.75
}
}
]
}
]
| 29.168201 | 67 | 0.261831 |
5ea685d51551aeb72095bc8a466ad5c6ce50f6cb | 6,543 | py | Python | sdk/python/pulumi_azure_native/network/v20160330/_enums.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
]
| 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_native/network/v20160330/_enums.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
]
| 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_native/network/v20160330/_enums.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
]
| 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'ApplicationGatewayCookieBasedAffinity',
'ApplicationGatewayProtocol',
'ApplicationGatewayRequestRoutingRuleType',
'ApplicationGatewaySkuName',
'ApplicationGatewayTier',
'AuthorizationUseStatus',
'ExpressRouteCircuitPeeringAdvertisedPublicPrefixState',
'ExpressRouteCircuitPeeringState',
'ExpressRouteCircuitPeeringType',
'ExpressRouteCircuitSkuFamily',
'ExpressRouteCircuitSkuTier',
'IPAllocationMethod',
'IPVersion',
'LoadDistribution',
'ProbeProtocol',
'RouteNextHopType',
'SecurityRuleAccess',
'SecurityRuleDirection',
'SecurityRuleProtocol',
'ServiceProviderProvisioningState',
'TransportProtocol',
'VirtualNetworkGatewayConnectionStatus',
'VirtualNetworkGatewayConnectionType',
'VirtualNetworkGatewaySkuName',
'VirtualNetworkGatewaySkuTier',
'VirtualNetworkGatewayType',
'VpnType',
]
class ApplicationGatewayCookieBasedAffinity(str, Enum):
"""
Gets or sets the cookie affinity
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class ApplicationGatewayProtocol(str, Enum):
"""
Gets or sets the protocol
"""
HTTP = "Http"
HTTPS = "Https"
class ApplicationGatewayRequestRoutingRuleType(str, Enum):
"""
Gets or sets the rule type
"""
BASIC = "Basic"
PATH_BASED_ROUTING = "PathBasedRouting"
class ApplicationGatewaySkuName(str, Enum):
"""
Gets or sets name of application gateway SKU
"""
STANDARD_SMALL = "Standard_Small"
STANDARD_MEDIUM = "Standard_Medium"
STANDARD_LARGE = "Standard_Large"
class ApplicationGatewayTier(str, Enum):
"""
Gets or sets tier of application gateway
"""
STANDARD = "Standard"
class AuthorizationUseStatus(str, Enum):
"""
Gets or sets AuthorizationUseStatus
"""
AVAILABLE = "Available"
IN_USE = "InUse"
class ExpressRouteCircuitPeeringAdvertisedPublicPrefixState(str, Enum):
"""
Gets or sets AdvertisedPublicPrefixState of the Peering resource
"""
NOT_CONFIGURED = "NotConfigured"
CONFIGURING = "Configuring"
CONFIGURED = "Configured"
VALIDATION_NEEDED = "ValidationNeeded"
class ExpressRouteCircuitPeeringState(str, Enum):
"""
Gets or sets state of Peering
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class ExpressRouteCircuitPeeringType(str, Enum):
"""
Gets or sets PeeringType
"""
AZURE_PUBLIC_PEERING = "AzurePublicPeering"
AZURE_PRIVATE_PEERING = "AzurePrivatePeering"
MICROSOFT_PEERING = "MicrosoftPeering"
class ExpressRouteCircuitSkuFamily(str, Enum):
"""
Gets or sets family of the sku.
"""
UNLIMITED_DATA = "UnlimitedData"
METERED_DATA = "MeteredData"
class ExpressRouteCircuitSkuTier(str, Enum):
"""
Gets or sets tier of the sku.
"""
STANDARD = "Standard"
PREMIUM = "Premium"
class IPAllocationMethod(str, Enum):
"""
Gets or sets PrivateIP allocation method (Static/Dynamic)
"""
STATIC = "Static"
DYNAMIC = "Dynamic"
class IPVersion(str, Enum):
"""
Gets or sets PrivateIP address version (IPv4/IPv6)
"""
I_PV4 = "IPv4"
I_PV6 = "IPv6"
class LoadDistribution(str, Enum):
"""
Gets or sets the load distribution policy for this rule
"""
DEFAULT = "Default"
SOURCE_IP = "SourceIP"
SOURCE_IP_PROTOCOL = "SourceIPProtocol"
class ProbeProtocol(str, Enum):
"""
Gets or sets the protocol of the end point. Possible values are http pr Tcp. If Tcp is specified, a received ACK is required for the probe to be successful. If http is specified,a 200 OK response from the specifies URI is required for the probe to be successful
"""
HTTP = "Http"
TCP = "Tcp"
class RouteNextHopType(str, Enum):
"""
Gets or sets the type of Azure hop the packet should be sent to.
"""
VIRTUAL_NETWORK_GATEWAY = "VirtualNetworkGateway"
VNET_LOCAL = "VnetLocal"
INTERNET = "Internet"
VIRTUAL_APPLIANCE = "VirtualAppliance"
NONE = "None"
class SecurityRuleAccess(str, Enum):
"""
Gets or sets network traffic is allowed or denied. Possible values are 'Allow' and 'Deny'
"""
ALLOW = "Allow"
DENY = "Deny"
class SecurityRuleDirection(str, Enum):
"""
Gets or sets the direction of the rule.InBound or Outbound. The direction specifies if rule will be evaluated on incoming or outgoing traffic.
"""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class SecurityRuleProtocol(str, Enum):
"""
Gets or sets Network protocol this rule applies to. Can be Tcp, Udp or All(*).
"""
TCP = "Tcp"
UDP = "Udp"
ASTERISK = "*"
class ServiceProviderProvisioningState(str, Enum):
"""
Gets or sets ServiceProviderProvisioningState state of the resource
"""
NOT_PROVISIONED = "NotProvisioned"
PROVISIONING = "Provisioning"
PROVISIONED = "Provisioned"
DEPROVISIONING = "Deprovisioning"
class TransportProtocol(str, Enum):
"""
Gets or sets the transport protocol for the external endpoint. Possible values are Udp or Tcp
"""
UDP = "Udp"
TCP = "Tcp"
class VirtualNetworkGatewayConnectionStatus(str, Enum):
"""
Virtual network Gateway connection status
"""
UNKNOWN = "Unknown"
CONNECTING = "Connecting"
CONNECTED = "Connected"
NOT_CONNECTED = "NotConnected"
class VirtualNetworkGatewayConnectionType(str, Enum):
"""
Gateway connection type IPsec/Dedicated/VpnClient/Vnet2Vnet
"""
IPSEC = "IPsec"
VNET2_VNET = "Vnet2Vnet"
EXPRESS_ROUTE = "ExpressRoute"
VPN_CLIENT = "VPNClient"
class VirtualNetworkGatewaySkuName(str, Enum):
"""
Gateway sku name -Basic/HighPerformance/Standard
"""
BASIC = "Basic"
HIGH_PERFORMANCE = "HighPerformance"
STANDARD = "Standard"
class VirtualNetworkGatewaySkuTier(str, Enum):
"""
Gateway sku tier -Basic/HighPerformance/Standard
"""
BASIC = "Basic"
HIGH_PERFORMANCE = "HighPerformance"
STANDARD = "Standard"
class VirtualNetworkGatewayType(str, Enum):
"""
The type of this virtual network gateway.
"""
VPN = "Vpn"
EXPRESS_ROUTE = "ExpressRoute"
class VpnType(str, Enum):
"""
The type of this virtual network gateway.
"""
POLICY_BASED = "PolicyBased"
ROUTE_BASED = "RouteBased"
| 24.414179 | 265 | 0.683479 |
8d3248d6618818ee429828912e6ae7fbd78e9e3a | 980 | py | Python | easyneuron/neighbours/__init__.py | neuron-ai/easyNeuron | 15454f342092729765ed3428f8b54ec7d0e5e626 | [
"Apache-2.0"
]
| 1 | 2021-12-11T14:04:04.000Z | 2021-12-11T14:04:04.000Z | easyneuron/neighbours/__init__.py | neuron-ai/easyNeuron | 15454f342092729765ed3428f8b54ec7d0e5e626 | [
"Apache-2.0"
]
| null | null | null | easyneuron/neighbours/__init__.py | neuron-ai/easyNeuron | 15454f342092729765ed3428f8b54ec7d0e5e626 | [
"Apache-2.0"
]
| null | null | null | """easyneuron.neighbours offers a variety of tools to accellerate the development and usage of neighbour-based ML algorithms.
Classes
-------
KNNClassifier - K-Nearest-Neighbours classification algorithm
"""
# Copyright 2021 Neuron-AI GitHub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from easyneuron.neighbours.knearest import KNNClassifier | 42.608696 | 126 | 0.696939 |
ff89d6e03cef335fa05a5e68cd8ff1dc5ec7a293 | 33,458 | py | Python | stellar_base/builder.py | shredding/py-stellar-base | 46089e0c27f43fb877c3d105c661b43921094e12 | [
"Apache-2.0"
]
| null | null | null | stellar_base/builder.py | shredding/py-stellar-base | 46089e0c27f43fb877c3d105c661b43921094e12 | [
"Apache-2.0"
]
| null | null | null | stellar_base/builder.py | shredding/py-stellar-base | 46089e0c27f43fb877c3d105c661b43921094e12 | [
"Apache-2.0"
]
| null | null | null | # coding: utf-8
import binascii
import warnings
from .asset import Asset
from .horizon import HORIZON_LIVE, HORIZON_TEST
from .horizon import Horizon
from .keypair import Keypair
from . import memo
from .network import NETWORKS, Network
from . import operation
from .transaction import Transaction
from .transaction_envelope import TransactionEnvelope as Te
from .exceptions import SignatureExistError
from .federation import federation, FederationError
class Builder(object):
"""The :class:`Builder` object, which uses the builder pattern to create
a list of operations in a :class:`Transaction`, ultimately to be submitted
as a :class:`TransactionEnvelope` to the network via Horizon (see
:class:`Horizon`).
:param str secret: The base32 secret seed for the source address.
:param str address: The base32 source address.
:param str horizon_uri: The horizon instance to use for submitting the created
transaction.
:param str network: The network to connect to for verifying and retrieving
additional attributes from. 'PUBLIC' is an alias for 'Public Global Stellar Network ; September 2015',
'TESTNET' is an alias for 'Test SDF Network ; September 2015'. Defaults to TESTNET.
:param sequence: The sequence number to use for submitting this
transaction with (must be the *current* sequence number of the source
account)
:type sequence: int, str
:param int fee: The network base fee is currently set to
100 stroops (0.00001 lumens). Transaction fee is equal to base fee
times number of operations in this transaction.
"""
def __init__(self,
secret=None,
address=None,
horizon_uri=None,
network=None,
sequence=None,
fee=100):
if secret:
self.keypair = Keypair.from_seed(secret)
self.address = self.keypair.address().decode()
else:
self.keypair = None
self.address = None
if address is None and secret is None:
raise Exception('No Stellar address afforded.')
if address is not None and secret is None:
self.address = Keypair.from_address(address).address().decode()
self.keypair = None
if network is None:
self.network = 'TESTNET'
elif network.upper() in NETWORKS:
self.network = network.upper()
else:
self.network = network
if horizon_uri:
self.horizon = Horizon(horizon_uri)
elif self.network == 'PUBLIC':
self.horizon = Horizon(HORIZON_LIVE)
else:
self.horizon = Horizon(HORIZON_TEST)
if sequence:
self.sequence = int(sequence)
elif self.address:
self.sequence = self.get_sequence()
else:
self.sequence = None
self.ops = []
self.time_bounds = None
self.memo = memo.NoneMemo()
self.fee = fee
self.tx = None
self.te = None
def append_op(self, operation):
"""Append an :class:`Operation <stellar_base.operation.Operation>` to
the list of operations.
Add the operation specified if it doesn't already exist in the list of
operations of this :class:`Builder` instance.
:param operation: The operation to append to the list of operations.
:type operation: :class:`Operation`
:return: This builder instance.
"""
if operation not in self.ops:
self.ops.append(operation)
return self
def append_create_account_op(self,
destination,
starting_balance,
source=None):
"""Append a :class:`CreateAccount
<stellar_base.operation.CreateAccount>` operation to the list of
operations.
:param str destination: Account address that is created and funded.
:param str starting_balance: Amount of XLM to send to the newly created
account. This XLM comes from the source account.
:param str source: The source address to deduct funds from to fund the
new account.
:return: This builder instance.
"""
op = operation.CreateAccount(destination, starting_balance, source)
return self.append_op(op)
def append_trust_op(self, destination, code, limit=None, source=None):
"""append_trust_op will be deprecated in the future, use append_change_trust_op instead.
Append a :class:`ChangeTrust <stellar_base.operation.ChangeTrust>`
operation to the list of operations.
:param str destination: The issuer address for the asset.
:param str code: The asset code for the asset.
:param str limit: The limit of the new trustline.
:param str source: The source address to add the trustline to.
:return: This builder instance.
"""
warnings.warn(
"append_trust_op will be deprecated in the future, use append_change_trust_op instead.",
PendingDeprecationWarning
)
return self.append_change_trust_op(asset_code=code, asset_issuer=destination, limit=limit, source=source)
def append_change_trust_op(self, asset_code, asset_issuer, limit=None, source=None):
"""Append a :class:`ChangeTrust <stellar_base.operation.ChangeTrust>`
operation to the list of operations.
:param str asset_issuer: The issuer address for the asset.
:param str asset_code: The asset code for the asset.
:param str limit: The limit of the new trustline.
:param str source: The source address to add the trustline to.
:return: This builder instance.
"""
asset = Asset(asset_code, asset_issuer)
op = operation.ChangeTrust(asset, limit, source)
return self.append_op(op)
def append_payment_op(self,
destination,
amount,
asset_code='XLM',
asset_issuer=None,
source=None):
"""Append a :class:`Payment <stellar_base.operation.Payment>` operation
to the list of operations.
:param str destination: Account address that receives the payment.
:param str amount: The amount of the currency to send in the payment.
:param str asset_code: The asset code for the asset to send.
:param asset_issuer: The address of the issuer of the asset.
:type asset_issuer: str, None
:param str source: The source address of the payment.
:return: This builder instance.
"""
asset = Asset(code=asset_code, issuer=asset_issuer)
op = operation.Payment(destination, asset, amount, source)
return self.append_op(op)
def append_path_payment_op(self,
destination,
send_code,
send_issuer,
send_max,
dest_code,
dest_issuer,
dest_amount,
path,
source=None):
"""Append a :class:`PathPayment <stellar_base.operation.PathPayment>`
operation to the list of operations.
:param str destination: The destination address (Account ID) for the
payment.
:param str send_code: The asset code for the source asset deducted from
the source account.
:param send_issuer: The address of the issuer of the source asset.
:type send_issuer: str, None
:param str send_max: The maximum amount of send asset to deduct
(excluding fees).
:param str dest_code: The asset code for the final destination asset
sent to the recipient.
:param dest_issuer: Account address that receives the payment.
:type dest_issuer: str, None
:param str dest_amount: The amount of destination asset the destination
account receives.
:param list path: A list of asset tuples, each tuple containing a
(asset_code, asset_issuer) for each asset in the path. For the native
asset, `None` is used for the asset_issuer.
:param str source: The source address of the path payment.
:return: This builder instance.
"""
# path: a list of asset tuple which contains asset_code and asset_issuer,
# [(asset_code, asset_issuer), (asset_code, asset_issuer)] for native asset you can deliver
# ('XLM', None)
send_asset = Asset(send_code, send_issuer)
dest_asset = Asset(dest_code, dest_issuer)
assets = []
for p in path:
assets.append(Asset(p[0], p[1]))
op = operation.PathPayment(destination, send_asset, send_max,
dest_asset, dest_amount, assets, source)
return self.append_op(op)
def append_allow_trust_op(self,
trustor,
asset_code,
authorize,
source=None):
"""Append an :class:`AllowTrust <stellar_base.operation.AllowTrust>`
operation to the list of operations.
:param str trustor: The account of the recipient of the trustline.
:param str asset_code: The asset of the trustline the source account
is authorizing. For example, if an anchor wants to allow another
account to hold its USD credit, the type is USD:anchor.
:param bool authorize: Flag indicating whether the trustline is
authorized.
:param str source: The source address that is establishing the trust in
the allow trust operation.
:return: This builder instance.
"""
op = operation.AllowTrust(trustor, asset_code, authorize, source)
return self.append_op(op)
def append_set_options_op(self,
inflation_dest=None,
clear_flags=None,
set_flags=None,
master_weight=None,
low_threshold=None,
med_threshold=None,
high_threshold=None,
home_domain=None,
signer_address=None,
signer_type=None,
signer_weight=None,
source=None):
"""Append a :class:`SetOptions <stellar_base.operation.SetOptions>`
operation to the list of operations.
.. _Accounts:
https://www.stellar.org/developers/guides/concepts/accounts.html
:param str inflation_dest: The address in which to send inflation to on
an :class:`Inflation <stellar_base.operation.Inflation>` operation.
:param int clear_flags: Indicates which flags to clear. For details
about the flags, please refer to Stellar's documentation on
`Accounts`_. The bit mask integer subtracts from the existing flags
of the account. This allows for setting specific bits without
knowledge of existing flags.
:param int set_flags: Indicates which flags to set. For details about
the flags, please refer to Stellar's documentation on `Accounts`_.
The bit mask integer adds onto the existing flags of the account.
This allows for setting specific bits without knowledge of existing
flags.
:param int master_weight: Weight of the master key. This account may
also add other keys with which to sign transactions using the
signer param.
:param int low_threshold: A number from 0-255 representing the
threshold this account sets on all operations it performs that have
a `low threshold
<https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_.
:param int med_threshold: A number from 0-255 representing the
threshold this account sets on all operations it performs that have
a `medium threshold
<https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_.
:param int high_threshold: A number from 0-255 representing the
threshold this account sets on all operations it performs that have
a `high threshold
<https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_.
:param str home_domain: Sets the home domain of an account. See
Stellar's documentation on `Federation
<https://www.stellar.org/developers/guides/concepts/federation.html>`_.
:param signer_address: The address of the new signer to add to the
source account.
:type signer_address: str, bytes
:param str signer_type: The type of signer to add to the account. Must
be in ('ed25519PublicKey', 'hashX', 'preAuthTx'). See Stellar's
documentation for `Multi-Sign
<https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_
for more information.
:param int signer_weight: The weight of the signer. If the weight is 0,
the signer will be deleted.
:param str source: The source address for which options are being set.
:return: This builder instance.
"""
op = operation.SetOptions(inflation_dest, clear_flags, set_flags,
master_weight, low_threshold, med_threshold,
high_threshold, home_domain, signer_address,
signer_type, signer_weight, source)
return self.append_op(op)
def append_hashx_signer(self, hashx, signer_weight, source=None):
"""Add a HashX signer to an account.
Add a HashX signer to an account via a :class:`SetOptions
<stellar_base.operation.SetOptions` operation. This is a helper
function for :meth:`append_set_options_op`.
:param hashx: The address of the new hashX signer.
:type hashx: str, bytes
:param int signer_weight: The weight of the new signer.
:param str source: The source account that is adding a signer to its
list of signers.
:return: This builder instance.
"""
return self.append_set_options_op(
signer_address=hashx,
signer_type='hashX',
signer_weight=signer_weight,
source=source)
def append_pre_auth_tx_signer(self,
pre_auth_tx,
signer_weight,
source=None):
"""Add a PreAuthTx signer to an account.
Add a PreAuthTx signer to an account via a :class:`SetOptions
<stellar_base.operation.SetOptions` operation. This is a helper
function for :meth:`append_set_options_op`.
:param pre_auth_tx: The address of the new preAuthTx signer - obtained by calling `hash_meta` on the TransactionEnvelope.
:type pre_auth_tx: str, bytes
:param int signer_weight: The weight of the new signer.
:param str source: The source account that is adding a signer to its
list of signers.
:return: This builder instance.
"""
return self.append_set_options_op(
signer_address=pre_auth_tx,
signer_type='preAuthTx',
signer_weight=signer_weight,
source=source)
def append_manage_offer_op(self,
selling_code,
selling_issuer,
buying_code,
buying_issuer,
amount,
price,
offer_id=0,
source=None):
"""Append a :class:`ManageOffer <stellar_base.operation.ManageOffer>`
operation to the list of operations.
:param str selling_code: The asset code for the asset the offer creator
is selling.
:param selling_issuer: The issuing address for the asset the offer
creator is selling.
:type selling_issuer: str, None
:param str buying_code: The asset code for the asset the offer creator
is buying.
:param buying_issuer: The issuing address for the asset the offer
creator is selling.
:type buying_issuer: str, None
:param str amount: Amount of the asset being sold. Set to 0 if you want
to delete an existing offer.
:param price: Price of 1 unit of selling in terms of buying. You can pass
in a number as a string or a dict like `{n: numerator, d: denominator}`
:type price: str, dict
:param int offer_id: The ID of the offer. 0 for new offer. Set to
existing offer ID to update or delete.
:param str source: The source address that is managing an offer on
Stellar's distributed exchange.
:return: This builder instance.
"""
selling = Asset(selling_code, selling_issuer)
buying = Asset(buying_code, buying_issuer)
op = operation.ManageOffer(selling, buying, amount, price, offer_id,
source)
return self.append_op(op)
def append_create_passive_offer_op(self,
selling_code,
selling_issuer,
buying_code,
buying_issuer,
amount,
price,
source=None):
"""Append a :class:`CreatePassiveOffer
<stellar_base.operation.CreatePassiveOffer>` operation to the list of
operations.
:param str selling_code: The asset code for the asset the offer creator
is selling.
:param selling_issuer: The issuing address for the asset the offer
creator is selling.
:type selling_issuer: str, None
:param str buying_code: The asset code for the asset the offer creator
is buying.
:param buying_issuer: The issuing address for the asset the offer
creator is selling.
:type buying_issuer: str, None
:param str amount: Amount of the asset being sold. Set to 0 if you want
to delete an existing offer.
:param price: Price of 1 unit of selling in terms of buying. You can pass
in a number as a string or a dict like `{n: numerator, d: denominator}`
:type price: str, dict
:param str source: The source address that is creating a passive offer
on Stellar's distributed exchange.
:return: This builder instance.
"""
selling = Asset(selling_code, selling_issuer)
buying = Asset(buying_code, buying_issuer)
op = operation.CreatePassiveOffer(selling, buying, amount, price,
source)
return self.append_op(op)
def append_account_merge_op(self, destination, source=None):
"""Append a :class:`AccountMerge
<stellar_base.operation.AccountMerge>` operation to the list of
operations.
:param str destination: The ID of the offer. 0 for new offer. Set to
existing offer ID to update or delete.
:param str source: The source address that is being merged into the
destination account.
:return: This builder instance.
"""
op = operation.AccountMerge(destination, source)
return self.append_op(op)
def append_inflation_op(self, source=None):
"""Append a :class:`Inflation
<stellar_base.operation.Inflation>` operation to the list of
operations.
:param str source: The source address that is running the inflation
operation.
:return: This builder instance.
"""
op = operation.Inflation(source)
return self.append_op(op)
def append_manage_data_op(self, data_name, data_value, source=None):
"""Append a :class:`ManageData <stellar_base.operation.ManageData>`
operation to the list of operations.
:param str data_name: String up to 64 bytes long. If this is a new Name
it will add the given name/value pair to the account. If this Name
is already present then the associated value will be modified.
:param data_value: If not present then the existing
Name will be deleted. If present then this value will be set in the
DataEntry. Up to 64 bytes long.
:type data_value: str, bytes, None
:param str source: The source account on which data is being managed.
operation.
:return: This builder instance.
"""
op = operation.ManageData(data_name, data_value, source)
return self.append_op(op)
def append_bump_sequence_op(self, bump_to, source=None):
"""Append a :class:`BumpSequence <stellar_base.operation.BumpSequence>`
operation to the list of operations.
Only available in protocol version 10 and above
:param int bump_to: Sequence number to bump to.
:param str source: The source address that is running the inflation
operation.
:return: This builder instance.
"""
op = operation.BumpSequence(bump_to, source)
return self.append_op(op)
def add_memo(self, memo):
"""Set the memo for the transaction build by this :class:`Builder`.
:param memo: A memo to add to this transaction.
:type memo: :class:`Memo <stellar_base.memo.Memo>`
:return: This builder instance.
"""
self.memo = memo
return self
def add_text_memo(self, memo_text):
"""Set the memo for the transaction to a new :class:`TextMemo
<stellar_base.memo.TextMemo>`.
:param str memo_text: The text for the memo to add.
:return: This builder instance.
"""
memo_text = memo.TextMemo(memo_text)
return self.add_memo(memo_text)
def add_id_memo(self, memo_id):
"""Set the memo for the transaction to a new :class:`IdMemo
<stellar_base.memo.IdMemo>`.
:param int memo_id: A 64 bit unsigned integer to set as the memo.
:return: This builder instance.
"""
memo_id = memo.IdMemo(memo_id)
return self.add_memo(memo_id)
def add_hash_memo(self, memo_hash):
"""Set the memo for the transaction to a new :class:`HashMemo
<stellar_base.memo.HashMemo>`.
:param memo_hash: A 32 byte hash or hex encoded string to use as the memo.
:type memo_hash: bytes, str
:return: This builder instance.
"""
memo_hash = memo.HashMemo(memo_hash)
return self.add_memo(memo_hash)
def add_ret_hash_memo(self, memo_return):
"""Set the memo for the transaction to a new :class:`RetHashMemo
<stellar_base.memo.RetHashMemo>`.
:param bytes memo_return: A 32 byte hash or hex encoded string intended to be interpreted as
the hash of the transaction the sender is refunding.
:type memo_return: bytes, str
:return: This builder instance.
"""
memo_return = memo.RetHashMemo(memo_return)
return self.add_memo(memo_return)
def add_time_bounds(self, time_bounds):
"""Add a time bound to this transaction.
Add a UNIX timestamp, determined by ledger time, of a lower and
upper bound of when this transaction will be valid. If a transaction is
submitted too early or too late, it will fail to make it into the
transaction set. maxTime equal 0 means that it's not set.
:param dict time_bounds: A dict that contains a minTime and maxTime attribute
(`{'minTime': 1534392138, 'maxTime': 1534392238}`) representing the
lower and upper bound of when a given transaction will be valid.
:return: This builder instance.
"""
self.time_bounds = time_bounds
return self
def federation_payment(self,
fed_address,
amount,
asset_code='XLM',
asset_issuer=None,
source=None,
allow_http=False):
"""Append a :class:`Payment <stellar_base.operation.Payment>` operation
to the list of operations using federation on the destination address.
Translates the destination stellar address to an account ID via
:func:`federation <stellar_base.federation.federation>`, before
creating a new payment operation via :meth:`append_payment_op`.
:param str fed_address: A Stellar Address that needs to be translated
into a valid account ID via federation.
:param str amount: The amount of the currency to send in the payment.
:param str asset_code: The asset code for the asset to send.
:param str asset_issuer: The address of the issuer of the asset.
:param str source: The source address of the payment.
:param bool allow_http: When set to `True`, connections to insecure http protocol federation servers
will be allowed. Must be set to `False` in production. Default: `False`.
:return: This builder instance.
"""
fed_info = federation(
address_or_id=fed_address, fed_type='name', allow_http=allow_http)
if not fed_info or not fed_info.get('account_id'):
raise FederationError(
'Cannot determine Stellar Address to Account ID translation '
'via Federation server')
self.append_payment_op(fed_info['account_id'], amount, asset_code,
asset_issuer, source)
memo_type = fed_info.get('memo_type')
if memo_type is not None and memo_type in ('text', 'id', 'hash'):
getattr(self, 'add_' + memo_type + '_memo')(fed_info['memo'])
def gen_tx(self):
"""Generate a :class:`Transaction
<stellar_base.transaction.Transaction>` object from the list of
operations contained within this object.
:return: A transaction representing all of the operations that have
been appended to this builder.
:rtype: :class:`Transaction <stellar_base.transaction.Transaction>`
"""
if not self.address:
raise Exception('Transaction does not have any source address')
if not self.sequence:
raise Exception('No sequence is present, maybe not funded?')
tx = Transaction(
source=self.address,
sequence=self.sequence,
time_bounds=self.time_bounds,
memo=self.memo,
fee=self.fee * len(self.ops),
operations=self.ops)
self.tx = tx
return tx
def gen_te(self):
"""Generate a :class:`TransactionEnvelope
<stellar_base.transaction_envelope.TransactionEnvelope>` around the
generated Transaction via the list of operations in this instance.
:return: A transaction envelope ready to send over the network.
:rtype: :class:`TransactionEnvelope
<stellar_base.transaction_envelope.TransactionEnvelope>`
"""
if self.tx is None:
self.gen_tx()
te = Te(self.tx, network_id=self.network)
if self.te:
te.signatures = self.te.signatures
self.te = te
return te
def gen_xdr(self):
"""Create an XDR object around a newly generated
:class:`TransactionEnvelope
<stellar_base.transaction_envelope.TransactionEnvelope>`.
:return: An XDR object representing a newly created transaction
envelope ready to send over the network.
"""
if self.tx is None:
self.gen_te()
return self.te.xdr()
def gen_compliance_xdr(self):
"""Create an XDR object representing this builder's transaction to be
sent over via the Compliance protocol (notably, with a sequence number
of 0).
Intentionally, the XDR object is returned without any signatures on the
transaction.
See `Stellar's documentation on its Compliance Protocol
<https://www.stellar.org/developers/guides/compliance-protocol.html>`_
for more information.
"""
sequence = self.sequence
self.sequence = -1
tx_xdr = self.gen_tx().xdr()
self.sequence = sequence
return tx_xdr
def hash(self):
"""Return a hash for this transaction.
:return: A hash for this transaction.
:rtype: bytes
"""
return self.gen_te().hash_meta()
def hash_hex(self):
"""Return a hex encoded hash for this transaction.
:return: A hex encoded hash for this transaction.
:rtype: str
"""
return binascii.hexlify(self.hash()).decode()
def import_from_xdr(self, xdr):
"""Create a :class:`TransactionEnvelope
<stellar_base.transaction_envelope.TransactionEnvelope>` via an XDR
object.
In addition, sets the fields of this builder (the transaction envelope,
transaction, operations, source, etc.) to all of the fields in the
provided XDR transaction envelope.
:param xdr: The XDR object representing the transaction envelope to
which this builder is setting its state to.
:type xdr: bytes, str
"""
te = Te.from_xdr(xdr)
if self.network.upper() in NETWORKS:
te.network_id = Network(NETWORKS[self.network]).network_id()
else:
te.network_id = Network(self.network).network_id()
self.te = te
self.tx = te.tx # with a different source or not .
self.ops = te.tx.operations
self.address = te.tx.source
self.sequence = te.tx.sequence - 1
time_bounds_in_xdr = te.tx.time_bounds
if time_bounds_in_xdr:
self.time_bounds = {
'maxTime': time_bounds_in_xdr[0].maxTime,
'minTime': time_bounds_in_xdr[0].minTime
}
else:
self.time_bounds = None
self.memo = te.tx.memo
return self
def sign(self, secret=None):
"""Sign the generated :class:`TransactionEnvelope
<stellar_base.transaction_envelope.TransactionEnvelope>` from the list
of this builder's operations.
:param str secret: The secret seed to use if a key pair or secret was
not provided when this class was originaly instantiated, or if
another key is being utilized to sign the transaction envelope.
"""
keypair = self.keypair if not secret else Keypair.from_seed(secret)
self.gen_te()
try:
self.te.sign(keypair)
except SignatureExistError:
raise
def sign_preimage(self, preimage):
"""Sign the generated transaction envelope using a Hash(x) signature.
:param preimage: The value to be hashed and used as a signer on the
transaction envelope.
:type preimage: str, bytes
"""
if self.te is None:
self.gen_te()
try:
self.te.sign_hashX(preimage)
except SignatureExistError:
raise
def submit(self):
"""Submit the generated XDR object of the built transaction envelope to
Horizon.
Sends the generated transaction envelope over the wire via this
builder's :class:`Horizon <stellar_base.horizon.Horizon>` instance.
Note that you'll typically want to sign the transaction before
submitting via the sign methods.
:returns: A dict representing the JSON response from Horizon.
"""
return self.horizon.submit(self.gen_xdr())
def next_builder(self):
"""Create a new builder based off of this one with its sequence number
incremented.
:return: A new Builder instance
:rtype: :class:`Builder`
"""
sequence = self.sequence + 1
next_builder = Builder(
horizon_uri=self.horizon.horizon_uri,
address=self.address,
network=self.network,
sequence=sequence,
fee=self.fee)
next_builder.keypair = self.keypair
return next_builder
def get_sequence(self):
"""Get the sequence number for a given account via Horizon.
:return: The current sequence number for a given account
:rtype: int
"""
if not self.address:
raise ValueError('No address provided')
address = self.horizon.account(self.address)
return int(address.get('sequence')) | 41.002451 | 129 | 0.612828 |
8debb216347154706fa810448fac54aadf7f6272 | 870 | py | Python | src/cms/templatetags/text_filters.py | mckinly/cms-django | c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca | [
"Apache-2.0"
]
| 14 | 2020-12-03T07:56:30.000Z | 2021-10-30T13:09:50.000Z | integreat_cms/cms/templatetags/text_filters.py | Carlosbogo/integreat-cms | 066f188b138e105e72f5420bc36d25709f25402d | [
"Apache-2.0"
]
| 367 | 2020-11-20T00:34:20.000Z | 2021-12-14T15:20:42.000Z | src/cms/templatetags/text_filters.py | mckinly/cms-django | c9995a3bfab6ee2d02f2406a7f83cf91b7ccfcca | [
"Apache-2.0"
]
| 3 | 2021-02-09T18:46:52.000Z | 2021-12-07T10:41:39.000Z | """
This is a collection of tags and filters for strings.
"""
from django import template
register = template.Library()
@register.filter(name="words")
def words(text):
"""
Split the given text into a list of words, see :meth:`python:str.split`.
:param text: The input string
:type text: str
:return: The list of words in the text
:rtype: list
"""
return text.split()
@register.filter(name="linkcheck_status_filter")
def linkcheck_status_filter(status_message):
"""
Due to a long status entry for a single kind of faulty link,
this filter reduced the output when display in list view
:param status_message: error description
:type status_message: str
:return: a concise message
:rtype: str
"""
if status_message.startswith("Other Error:"):
return "Other Error"
return status_message
| 23.513514 | 76 | 0.687356 |
b37bf34822cdf36a8a5826bc16a0c7348e8f1236 | 2,181 | py | Python | setup.py | momyc/gevent-fastcgi | 4fef82c5a73a24b288d0d6c47bb63ff47921e8dc | [
"MIT"
]
| 11 | 2015-02-25T14:24:40.000Z | 2019-12-06T02:30:23.000Z | setup.py | momyc/gevent-fastcgi | 4fef82c5a73a24b288d0d6c47bb63ff47921e8dc | [
"MIT"
]
| 3 | 2015-12-05T14:20:52.000Z | 2019-12-21T01:00:01.000Z | setup.py | momyc/gevent-fastcgi | 4fef82c5a73a24b288d0d6c47bb63ff47921e8dc | [
"MIT"
]
| 7 | 2015-08-25T16:37:46.000Z | 2021-06-02T12:51:58.000Z | import os
import sys
from setuptools import setup, Extension, find_packages
ext_modules = []
# C speedups are no good for PyPy
if '__pypy__' not in sys.builtin_module_names:
if os.name == "nt":
ext_modules.append(
Extension('gevent_fastcgi.speedups', ['gevent_fastcgi/speedups.c'], libraries=["Ws2_32"]))
else:
ext_modules.append(
Extension('gevent_fastcgi.speedups', ['gevent_fastcgi/speedups.c']))
setup(
name='gevent-fastcgi',
version='1.1.0.0',
description='''FastCGI/WSGI client and server implemented using gevent
library''',
long_description='''
FastCGI/WSGI server implementation using gevent library. No need to
monkeypatch and slow down your favourite FastCGI server in order to make
it "green".
Supports connection multiplexing. Out-of-the-box support for Django and
frameworks that use PasteDeploy including Pylons and Pyramid.
''',
keywords='fastcgi gevent wsgi',
author='Alexander Kulakov',
author_email='[email protected]',
url='http://github.com/momyc/gevent-fastcgi',
packages=find_packages(exclude=('gevent_fastcgi.tests.*',)),
zip_safe=True,
license='MIT',
install_requires=[
"zope.interface>=3.8.0",
"gevent>=0.13.6",
"six",
],
entry_points={
'paste.server_runner': [
'fastcgi = gevent_fastcgi.adapters.paste_deploy:fastcgi_server_runner',
'wsgi = gevent_fastcgi.adapters.paste_deploy:wsgi_server_runner',
'wsgiref = gevent_fastcgi.adapters.paste_deploy:wsgiref_server_runner',
],
},
classifiers=(
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
),
test_suite="tests",
tests_require=['mock'],
ext_modules=ext_modules
)
| 34.619048 | 102 | 0.646492 |
f95e38997da5d346ef3d0f6684f0c86fdb7a050b | 2,723 | py | Python | OpenDataCatalog/opendata/feeds.py | runonthespot/Open-Data-Catalog | 5fca499b383279a511188d5e05190b46d37fdaad | [
"MIT"
]
| 105 | 2015-01-11T19:08:03.000Z | 2022-03-09T12:58:39.000Z | OpenDataCatalog/opendata/feeds.py | runonthespot/Open-Data-Catalog | 5fca499b383279a511188d5e05190b46d37fdaad | [
"MIT"
]
| 3 | 2017-08-23T15:12:39.000Z | 2019-03-06T22:58:35.000Z | OpenDataCatalog/opendata/feeds.py | runonthespot/Open-Data-Catalog | 5fca499b383279a511188d5e05190b46d37fdaad | [
"MIT"
]
| 40 | 2015-05-18T01:25:08.000Z | 2022-03-09T02:14:27.000Z | from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Rss201rev2Feed as Rss2
from django.shortcuts import get_object_or_404
from OpenDataCatalog.opendata.models import Resource, Tag, Idea
class BaseResourceFeed(Feed):
feed_type = Rss2
def item_title(self, item):
return item.name
def item_link(self, item):
return item.get_absolute_url()
def item_description(self, item):
return item.short_description
def item_author_name(self, item):
return item.organization
def item_author_email(self, item):
return item.contact_email
def item_author_link(self, item):
return item.contact_url
def item_categories(self, item):
return item.tags.all()
def item_pubdate(self, item):
return item.created
class ResourcesFeed(BaseResourceFeed):
title = "OpenDataPhilly.org: Resources - All"
link = "/feeds/resources/"
description = "List of resources on OpenDataPhilly.org listed in the order they were added"
description_template = "feeds/resource.html"
feed_type = Rss2
def items(self):
return Resource.objects.order_by('-created')
class UpdatesFeed(BaseResourceFeed):
title = "OpenDataPhilly.org: Resources - Last Updated"
link = "/feeds/updates/"
description = "List of resources on OpenDataPhilly.org listed in the order they were last updated"
description_template = "feeds/resource.html"
feed_type = Rss2
def items(self):
return Resource.objects.order_by('-last_updated')
class IdeasFeed(Feed):
title = "OpenDataPhilly.org: Ideas"
link = "/feeds/ideas/"
description = "List of ideas on OpenDataPhilly.org listed in the order they were added"
description_template = "feeds/idea.html"
feed_type = Rss2
def items(self):
return Idea.objects.order_by('-created_by_date')
def item_title(self, item):
return item.title
def item_link(self, item):
return item.get_absolute_url()
def item_author_name(self, item):
return item.author
def item_description(self, item):
return item.description
class TagFeed(BaseResourceFeed):
description_template = "feeds/resource.html"
def get_object(self, request, tag_id):
return get_object_or_404(Tag, pk=tag_id)
def title(self, obj):
return "OpenDataPhilly.org: Resources in %s" % obj.tag_name
def link(self, obj):
return "/feeds/tag/%i" % obj.id
def description(self, obj):
return "Resources with the tag %s in the order they were added" % obj.tag_name
def items(self, obj):
return Resource.objects.filter(tags=obj).order_by('-created')
| 34.0375 | 102 | 0.696658 |
da5dda673b7fe6e8a6eb580e1e0016e8307a9ee4 | 7,701 | py | Python | tests/test_utils.py | scottyhq/pystac | 6314b1961156b9e99857dd8c5cf99d4f72ef4ac3 | [
"Apache-2.0"
]
| null | null | null | tests/test_utils.py | scottyhq/pystac | 6314b1961156b9e99857dd8c5cf99d4f72ef4ac3 | [
"Apache-2.0"
]
| null | null | null | tests/test_utils.py | scottyhq/pystac | 6314b1961156b9e99857dd8c5cf99d4f72ef4ac3 | [
"Apache-2.0"
]
| null | null | null | import unittest
import os
import json
import ntpath
from datetime import datetime, timezone, timedelta
from pystac import utils
from pystac.utils import (make_relative_href, make_absolute_href, is_absolute_href)
class UtilsTest(unittest.TestCase):
def test_make_relative_href(self):
# Test cases of (source_href, start_href, expected)
test_cases = [
('/a/b/c/d/catalog.json', '/a/b/c/catalog.json', './d/catalog.json'),
('/a/b/catalog.json', '/a/b/c/catalog.json', '../catalog.json'),
('/a/catalog.json', '/a/b/c/catalog.json', '../../catalog.json'),
('http://stacspec.org/a/b/c/d/catalog.json', 'http://stacspec.org/a/b/c/catalog.json',
'./d/catalog.json'),
('http://stacspec.org/a/b/catalog.json', 'http://stacspec.org/a/b/c/catalog.json',
'../catalog.json'),
('http://stacspec.org/a/catalog.json', 'http://stacspec.org/a/b/c/catalog.json',
'../../catalog.json'),
('http://stacspec.org/a/catalog.json', 'http://cogeo.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json'),
('http://stacspec.org/a/catalog.json', 'https://stacspec.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json')
]
for source_href, start_href, expected in test_cases:
actual = make_relative_href(source_href, start_href)
self.assertEqual(actual, expected)
def test_make_relative_href_windows(self):
utils._pathlib = ntpath
try:
# Test cases of (source_href, start_href, expected)
test_cases = [
('C:\\a\\b\\c\\d\\catalog.json', 'C:\\a\\b\\c\\catalog.json', '.\\d\\catalog.json'),
('C:\\a\\b\\catalog.json', 'C:\\a\\b\\c\\catalog.json', '..\\catalog.json'),
('C:\\a\\catalog.json', 'C:\\a\\b\\c\\catalog.json', '..\\..\\catalog.json'),
('a\\b\\c\\catalog.json', 'a\\b\\catalog.json', '.\\c\\catalog.json'),
('a\\b\\catalog.json', 'a\\b\\c\\catalog.json', '..\\catalog.json'),
('http://stacspec.org/a/b/c/d/catalog.json',
'http://stacspec.org/a/b/c/catalog.json', './d/catalog.json'),
('http://stacspec.org/a/b/catalog.json', 'http://stacspec.org/a/b/c/catalog.json',
'../catalog.json'),
('http://stacspec.org/a/catalog.json', 'http://stacspec.org/a/b/c/catalog.json',
'../../catalog.json'),
('http://stacspec.org/a/catalog.json', 'http://cogeo.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json'),
('http://stacspec.org/a/catalog.json', 'https://stacspec.org/a/b/c/catalog.json',
'http://stacspec.org/a/catalog.json')
]
for source_href, start_href, expected in test_cases:
actual = make_relative_href(source_href, start_href)
self.assertEqual(actual, expected)
finally:
utils._pathlib = os.path
def test_make_absolute_href(self):
# Test cases of (source_href, start_href, expected)
test_cases = [('item.json', '/a/b/c/catalog.json', '/a/b/c/item.json'),
('./item.json', '/a/b/c/catalog.json', '/a/b/c/item.json'),
('./z/item.json', '/a/b/c/catalog.json', '/a/b/c/z/item.json'),
('../item.json', '/a/b/c/catalog.json', '/a/b/item.json'),
('item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/item.json'),
('./item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/item.json'),
('./z/item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/z/item.json'),
('../item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/item.json')]
for source_href, start_href, expected in test_cases:
actual = make_absolute_href(source_href, start_href)
self.assertEqual(actual, expected)
def test_make_absolute_href_windows(self):
utils._pathlib = ntpath
try:
# Test cases of (source_href, start_href, expected)
test_cases = [('item.json', 'C:\\a\\b\\c\\catalog.json', 'c:\\a\\b\\c\\item.json'),
('.\\item.json', 'C:\\a\\b\\c\\catalog.json', 'c:\\a\\b\\c\\item.json'),
('.\\z\\item.json', 'Z:\\a\\b\\c\\catalog.json',
'z:\\a\\b\\c\\z\\item.json'),
('..\\item.json', 'a:\\a\\b\\c\\catalog.json', 'a:\\a\\b\\item.json'),
('item.json', 'HTTPS://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/item.json'),
('./item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/item.json'),
('./z/item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/c/z/item.json'),
('../item.json', 'https://stacspec.org/a/b/c/catalog.json',
'https://stacspec.org/a/b/item.json')]
for source_href, start_href, expected in test_cases:
actual = make_absolute_href(source_href, start_href)
self.assertEqual(actual, expected)
finally:
utils._pathlib = os.path
def test_is_absolute_href(self):
# Test cases of (href, expected)
test_cases = [('item.json', False), ('./item.json', False), ('../item.json', False),
('/item.json', True), ('http://stacspec.org/item.json', True)]
for href, expected in test_cases:
actual = is_absolute_href(href)
self.assertEqual(actual, expected)
def test_is_absolute_href_windows(self):
utils._pathlib = ntpath
try:
# Test cases of (href, expected)
test_cases = [('item.json', False), ('.\\item.json', False), ('..\\item.json', False),
('c:\\item.json', True), ('http://stacspec.org/item.json', True)]
for href, expected in test_cases:
actual = is_absolute_href(href)
self.assertEqual(actual, expected)
finally:
utils._pathlib = os.path
def test_datetime_to_str(self):
cases = (
('timezone naive, assume utc', datetime(2000, 1, 1), '2000-01-01T00:00:00Z'),
('timezone aware, utc', datetime(2000, 1, 1,
tzinfo=timezone.utc), '2000-01-01T00:00:00Z'),
('timezone aware, utc -7', datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=-7))),
'2000-01-01T00:00:00-07:00'),
)
for title, dt, expected in cases:
with self.subTest(title=title):
got = utils.datetime_to_str(dt)
self.assertEqual(expected, got)
def test_geojson_bbox(self):
# Use sample Geojson from https://en.wikipedia.org/wiki/GeoJSON
with open('tests/data-files/geojson/sample.geojson') as sample_geojson:
all_features = json.load(sample_geojson)
geom_dicts = [f['geometry'] for f in all_features['features']]
for geom in geom_dicts:
got = utils.geometry_to_bbox(geom)
self.assertNotEqual(got, None)
| 51 | 100 | 0.529152 |
91dd079c4f75acbef563eaac8083f9d812797397 | 5,126 | py | Python | aggregators/brute.py | LPD-EPFL/DifferentialByzantine | 132d4feee305126c672b4f06198287daa615ec35 | [
"MIT"
]
| 2 | 2021-10-18T02:56:19.000Z | 2021-11-13T02:31:15.000Z | aggregators/brute.py | IKACE/DifferentialByzantine-1 | 809fd6e070fedeb87a6dbff6f883e93e3c5c8e09 | [
"MIT"
]
| null | null | null | aggregators/brute.py | IKACE/DifferentialByzantine-1 | 809fd6e070fedeb87a6dbff6f883e93e3c5c8e09 | [
"MIT"
]
| 1 | 2021-12-04T21:58:48.000Z | 2021-12-04T21:58:48.000Z | # coding: utf-8
###
# @file brute.py
# @author Sébastien Rouault <[email protected]>
#
# @section LICENSE
#
# Copyright © 2018-2021 École Polytechnique Fédérale de Lausanne (EPFL).
# See LICENSE file.
#
# @section DESCRIPTION
#
# Brute GAR.
###
import tools
from . import register
import itertools
import math
import torch
# Optional 'native' module
try:
import native
except ImportError:
native = None
# ---------------------------------------------------------------------------- #
# Brute GAR
def _compute_selection(gradients, f, **kwargs):
""" Brute rule.
Args:
gradients Non-empty list of gradients to aggregate
f Number of Byzantine gradients to tolerate
... Ignored keyword-arguments
Returns:
Selection index set
"""
n = len(gradients)
# Compute all pairwise distances
distances = [0] * (n * (n - 1) // 2)
for i, (x, y) in enumerate(tools.pairwise(tuple(range(n)))):
distances[i] = gradients[x].sub(gradients[y]).norm().item()
# Select the set of smallest diameter
sel_iset = None
sel_diam = None
for cur_iset in itertools.combinations(range(n), n - f):
# Compute the current diameter (max of pairwise distances)
cur_diam = 0.
for x, y in tools.pairwise(cur_iset):
# Get distance between these two gradients ("magic" formula valid since x < y)
cur_dist = distances[(2 * n - x - 3) * x // 2 + y - 1]
# Check finite distance (non-Byzantine gradient must only contain finite coordinates), drop set if non-finite
if not math.isfinite(cur_dist):
break
# Check if new maximum
if cur_dist > cur_diam:
cur_diam = cur_dist
else:
# Check if new selected diameter
if sel_iset is None or cur_diam < sel_diam:
sel_iset = cur_iset
sel_diam = cur_diam
# Return the selected gradients
assert sel_iset is not None, "Too many non-finite gradients: a non-Byzantine gradient must only contain finite coordinates"
return sel_iset
def aggregate(gradients, f, **kwargs):
""" Brute rule.
Args:
gradients Non-empty list of gradients to aggregate
f Number of Byzantine gradients to tolerate
... Ignored keyword-arguments
Returns:
Aggregated gradient
"""
sel_iset = _compute_selection(gradients, f, **kwargs)
return sum(gradients[i] for i in sel_iset).div_(len(gradients) - f)
def aggregate_native(gradients, f, **kwargs):
""" Brute rule.
Args:
gradients Non-empty list of gradients to aggregate
f Number of Byzantine gradients to tolerate
... Ignored keyword-arguments
Returns:
Aggregated gradient
"""
return native.brute.aggregate(gradients, f)
def check(gradients, f, **kwargs):
""" Check parameter validity for Brute rule.
Args:
gradients Non-empty list of gradients to aggregate
f Number of Byzantine gradients to tolerate
... Ignored keyword-arguments
Returns:
None if valid, otherwise error message string
"""
if not isinstance(gradients, list) or len(gradients) < 1:
return "Expected a list of at least one gradient to aggregate, got %r" % gradients
if not isinstance(f, int) or f < 1 or len(gradients) < 2 * f + 1:
return "Invalid number of Byzantine gradients to tolerate, got f = %r, expected 1 ≤ f ≤ %d" % (f, (len(gradients) - 1) // 2)
def upper_bound(n, f, d):
""" Compute the theoretical upper bound on the ratio non-Byzantine standard deviation / norm to use this rule.
Args:
n Number of workers (Byzantine + non-Byzantine)
f Expected number of Byzantine workers
d Dimension of the gradient space
Returns:
Theoretical upper-bound
"""
return (n - f) / (math.sqrt(8) * f)
def influence(honests, attacks, f, **kwargs):
""" Compute the ratio of accepted Byzantine gradients.
Args:
honests Non-empty list of honest gradients to aggregate
attacks List of attack gradients to aggregate
f Number of Byzantine gradients to tolerate
m Optional number of averaged gradients for Multi-Krum
... Ignored keyword-arguments
Returns:
Ratio of accepted
"""
gradients = honests + attacks
# Compute the selection set
sel_iset = _compute_selection(gradients, f, **kwargs)
# Compute the influence ratio
count = 0
for i in sel_iset:
gradient = gradients[i]
for attack in attacks:
if gradient is attack:
count += 1
break
return count / (len(gradients) - f)
# ---------------------------------------------------------------------------- #
# GAR registering
# Register aggregation rule (pytorch version)
method_name = "brute"
register(method_name, aggregate, check, upper_bound=upper_bound, influence=influence)
# Register aggregation rule (native version, if available)
if native is not None:
native_name = method_name
method_name = "native-" + method_name
if native_name in dir(native):
register(method_name, aggregate_native, check, upper_bound)
else:
tools.warning("GAR %r could not be registered since the associated native module %r is unavailable" % (method_name, native_name))
| 32.649682 | 133 | 0.665431 |
85f36bdd9ab27d953dfceee3235135b8a975b712 | 762 | py | Python | package/standalone_kr_usn/lib/hostap/script/wpas/halow_sta.py | teledatics/nrc7292_sdk | d0ba3f17e1bef3d6fec7370e7f0ffa77db56e3a4 | [
"MIT"
]
| 7 | 2020-07-20T03:58:40.000Z | 2022-03-15T13:29:18.000Z | package/standalone_kr_usn/lib/hostap/script/wpas/halow_sta.py | teledatics/nrc7292_sdk | d0ba3f17e1bef3d6fec7370e7f0ffa77db56e3a4 | [
"MIT"
]
| 3 | 2021-07-16T12:39:36.000Z | 2022-02-02T18:19:51.000Z | package/standalone_kr_usn/lib/hostap/script/wpas/halow_sta.py | teledatics/nrc7292_sdk | d0ba3f17e1bef3d6fec7370e7f0ffa77db56e3a4 | [
"MIT"
]
| 4 | 2020-09-19T18:03:04.000Z | 2022-02-02T13:17:34.000Z | #!/usr/bin/python
import os
import time
import commands
print "NRC STA setting for HaLow ..."
print "[0] rmmod"
os.system("sudo killall -9 wpa_supplicant")
os.system("sudo rmmod nrc")
print "[1] insmod"
os.system("sudo insmod ./nrc.ko power_save=1 fw_name=uni_s1g.bin")
time.sleep(5)
print "[2] set trx gain"
os.system('python /home/pi/nrc_pkg/python/shell.py run --cmd="phy rxgain 85"')
time.sleep(1)
os.system('python /home/pi/nrc_pkg/python/shell.py run --cmd="phy txgain 1"')
time.sleep(1)
print "[3] wpa_supplicant"
os.system("sudo wpa_supplicant -iwlan0 -c ./US/sta_halow_open.conf -dddd &")
time.sleep(3)
print "[4] show version"
os.system('python /home/pi/nrc_pkg/python/shell.py run --cmd="show version"')
print "HaLow STA ready"
time.sleep(1)
| 23.090909 | 78 | 0.716535 |
708abc0c59f58cdf1238639cb90b24b53cd4ac52 | 3,043 | py | Python | examples/repost_best_photos_from_users.py | vistar/instabot | 078606725b71fcc23081b205c6acecbf487024ed | [
"Apache-2.0"
]
| null | null | null | examples/repost_best_photos_from_users.py | vistar/instabot | 078606725b71fcc23081b205c6acecbf487024ed | [
"Apache-2.0"
]
| null | null | null | examples/repost_best_photos_from_users.py | vistar/instabot | 078606725b71fcc23081b205c6acecbf487024ed | [
"Apache-2.0"
]
| null | null | null | """
instabot example
Workflow:
Repost best photos from users to your account
By default bot checks username_database.txt
The file should contain one username per line!
"""
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
from instabot.bot.bot_support import read_list_from_file
USERNAME_DATABASE = 'username_database.txt'
POSTED_MEDIAS = 'posted_medias.txt'
def repost_best_photos(bot, users, amount=1):
medias = get_not_used_medias_from_users(bot, users)
medias = sort_best_medias(bot, medias, amount)
for media in tqdm(medias, desc='Reposting photos'):
repost_photo(bot, media)
def sort_best_medias(bot, media_ids, amount=1):
best_medias = [bot.get_media_info(media)[0] for media in tqdm(media_ids, desc='Getting media info')]
best_medias = sorted(best_medias, key=lambda x: (x['like_count'], x['comment_count']), reverse=True)
return [best_media['pk'] for best_media in best_medias[:amount]]
def get_not_used_medias_from_users(bot, users=None, users_path=USERNAME_DATABASE):
if not users:
users = read_list_from_file(users_path)
users = map(str, users)
total_medias = []
for user in users:
medias = bot.get_user_medias(user, filtration=False)
medias = [media for media in medias if not exists_in_posted_medias(media)]
total_medias.extend(medias)
return total_medias
def exists_in_posted_medias(new_media_id, path=POSTED_MEDIAS):
medias = read_list_from_file(path)
return str(new_media_id) in medias
def update_posted_medias(new_media_id, path=POSTED_MEDIAS):
medias = read_list_from_file(path)
medias.append(str(new_media_id))
with open(path, 'w') as file:
file.writelines('\n'.join(medias))
return True
def repost_photo(bot, new_media_id, path=POSTED_MEDIAS):
if exists_in_posted_medias(new_media_id, path):
bot.logger.warning("Media {0} was uploaded earlier".format(new_media_id))
return False
photo_path = bot.download_photo(new_media_id, description=True)
if not photo_path:
return False
with open(photo_path[:-3] + 'txt', 'r') as f:
text = ''.join(f.readlines())
if bot.upload_photo(photo_path, text):
update_posted_medias(new_media_id, path)
bot.logger.info('Media_id {0} is saved in {1}'.format(new_media_id, path))
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('-file', type=str, help="users filename")
parser.add_argument('-amount', type=int, help="amount", default=1)
parser.add_argument('users', type=str, nargs='*', help='users')
args = parser.parse_args()
bot = Bot()
bot.login()
users = None
if args.users:
users = args.users
elif args.file:
users = read_list_from_file(args.file)
repost_best_photos(bot, users, args.amount)
| 32.031579 | 104 | 0.720342 |
98da95c26c49e00f421e2acb4249e0c3f9032be5 | 662 | py | Python | auto/www/app.py | mawentao119/bigface | e6f14809b9dc0785ad7cdeccd38ed87de2b925a6 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
]
| null | null | null | auto/www/app.py | mawentao119/bigface | e6f14809b9dc0785ad7cdeccd38ed87de2b925a6 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
]
| 1 | 2021-08-13T10:43:31.000Z | 2021-08-13T10:43:31.000Z | auto/www/app.py | mawentao119/bigface | e6f14809b9dc0785ad7cdeccd38ed87de2b925a6 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
__author__ = "苦叶子"
"""
Modified by [email protected]
"""
from flask import Flask
from flask_apscheduler import APScheduler
from auto.configuration import config
scheduler = APScheduler()
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
scheduler.init_app(app)
scheduler.start()
# for blueprints
from .blueprints import routes as routes_blueprint
app.register_blueprint(routes_blueprint)
from .api import api_bp as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api/v1')
return app
| 20.060606 | 63 | 0.73716 |
77af8727abc9a2267e96a85e53b655ccf594f6af | 3,500 | py | Python | espnet2/bin/aggregate_stats_dirs.py | undeadyequ/espnet | 8c3f85ce695153abcb9cf365180b1d7554ad565e | [
"Apache-2.0"
]
| 1 | 2021-12-22T06:04:44.000Z | 2021-12-22T06:04:44.000Z | espnet2/bin/aggregate_stats_dirs.py | undeadyequ/espnet | 8c3f85ce695153abcb9cf365180b1d7554ad565e | [
"Apache-2.0"
]
| null | null | null | espnet2/bin/aggregate_stats_dirs.py | undeadyequ/espnet | 8c3f85ce695153abcb9cf365180b1d7554ad565e | [
"Apache-2.0"
]
| 1 | 2022-01-07T02:29:05.000Z | 2022-01-07T02:29:05.000Z | #!/usr/bin/env python3
import argparse
import logging
from pathlib import Path
import sys
from typing import Iterable
from typing import Union
import numpy as np
from espnet.utils.cli_utils import get_commandline_args
def aggregate_stats_dirs(
input_dir: Iterable[Union[str, Path]], output_dir: Union[str, Path], log_level: str,
):
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) (levelname)s: %(message)s",
)
input_dirs = [Path(p) for p in input_dir]
output_dir = Path(output_dir)
for mode in ["train", "valid"]:
with (input_dirs[0] / mode / "batch_keys").open("r", encoding="utf-8") as f:
batch_keys = [line.strip() for line in f if line.strip() != ""]
with (input_dirs[0] / mode / "stats_keys").open("r", encoding="utf-8") as f:
stats_keys = [line.strip() for line in f if line.strip() != ""]
(output_dir / mode).mkdir(parents=True, exist_ok=True)
for key in batch_keys:
with (output_dir / mode / f"{key}_shape").open(
"w", encoding="utf-8"
) as fout:
for idir in input_dirs:
with (idir / mode / f"{key}_shape").open(
"r", encoding="utf-8"
) as fin:
# Read to the last in order to sort keys
# because the order can be changed if num_workers>=1
lines = fin.readlines()
lines = sorted(lines, key=lambda x: x.split()[0])
for line in lines:
fout.write(line)
for key in stats_keys:
sum_stats = None
for idir in input_dirs:
stats = np.load(idir / mode / f"{key}_stats.npz")
if sum_stats is None:
sum_stats = dict(**stats)
else:
for k in stats:
sum_stats[k] += stats[k]
np.savez(output_dir / mode / f"{key}_stats.npz", **sum_stats)
# if --write_collected_feats=true
p = Path(mode) / "collect_feats" / f"{key}.scp"
scp = input_dirs[0] / p
if scp.exists():
(output_dir / p).parent.mkdir(parents=True, exist_ok=True)
with (output_dir / p).open("w", encoding="utf-8") as fout:
for idir in input_dirs:
with (idir / p).open("r", encoding="utf-8") as fin:
for line in fin:
fout.write(line)
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Aggregate statistics directories into one directory",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("INFO", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--input_dir", action="append", help="Input directories")
parser.add_argument("--output_dir", required=True, help="Output directory")
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
aggregate_stats_dirs(**kwargs)
if __name__ == "__main__":
main()
| 35.353535 | 88 | 0.550571 |
d51ea929be8aa572c050a6c917ad73fe581e1e26 | 3,143 | py | Python | rest_framework/pagination.py | 0x64746b/django-rest-framework | 8d83ff8e6c8513d0a88d6b1fecb34ed86f1e2085 | [
"Unlicense"
]
| 87 | 2016-01-24T16:41:02.000Z | 2021-12-20T21:13:24.000Z | rest_framework/pagination.py | laborautonomo/django-rest-framework | 299a8347e8ef448eefc611eebfe80d7e142ceaa1 | [
"Unlicense"
]
| 16 | 2020-02-11T23:19:19.000Z | 2022-03-11T23:33:40.000Z | rest_framework/pagination.py | laborautonomo/django-rest-framework | 299a8347e8ef448eefc611eebfe80d7e142ceaa1 | [
"Unlicense"
]
| 57 | 2016-02-02T05:46:14.000Z | 2021-03-21T15:46:06.000Z | """
Pagination serializers determine the structure of the output that should
be used for paginated responses.
"""
from __future__ import unicode_literals
from rest_framework import serializers
from rest_framework.templatetags.rest_framework import replace_query_param
class NextPageField(serializers.Field):
"""
Field that returns a link to the next page in paginated results.
"""
page_field = 'page'
def to_native(self, value):
if not value.has_next():
return None
page = value.next_page_number()
request = self.context.get('request')
url = request and request.build_absolute_uri() or ''
return replace_query_param(url, self.page_field, page)
class PreviousPageField(serializers.Field):
"""
Field that returns a link to the previous page in paginated results.
"""
page_field = 'page'
def to_native(self, value):
if not value.has_previous():
return None
page = value.previous_page_number()
request = self.context.get('request')
url = request and request.build_absolute_uri() or ''
return replace_query_param(url, self.page_field, page)
class DefaultObjectSerializer(serializers.Field):
"""
If no object serializer is specified, then this serializer will be applied
as the default.
"""
def __init__(self, source=None, context=None):
# Note: Swallow context kwarg - only required for eg. ModelSerializer.
super(DefaultObjectSerializer, self).__init__(source=source)
class PaginationSerializerOptions(serializers.SerializerOptions):
"""
An object that stores the options that may be provided to a
pagination serializer by using the inner `Meta` class.
Accessible on the instance as `serializer.opts`.
"""
def __init__(self, meta):
super(PaginationSerializerOptions, self).__init__(meta)
self.object_serializer_class = getattr(meta, 'object_serializer_class',
DefaultObjectSerializer)
class BasePaginationSerializer(serializers.Serializer):
"""
A base class for pagination serializers to inherit from,
to make implementing custom serializers more easy.
"""
_options_class = PaginationSerializerOptions
results_field = 'results'
def __init__(self, *args, **kwargs):
"""
Override init to add in the object serializer field on-the-fly.
"""
super(BasePaginationSerializer, self).__init__(*args, **kwargs)
results_field = self.results_field
object_serializer = self.opts.object_serializer_class
if 'context' in kwargs:
context_kwarg = {'context': kwargs['context']}
else:
context_kwarg = {}
self.fields[results_field] = object_serializer(source='object_list', **context_kwarg)
class PaginationSerializer(BasePaginationSerializer):
"""
A default implementation of a pagination serializer.
"""
count = serializers.Field(source='paginator.count')
next = NextPageField(source='*')
previous = PreviousPageField(source='*')
| 33.084211 | 93 | 0.686287 |
cb257908ca99a3923ad2354661946b9e54779d7d | 1,143 | py | Python | contrib/filter-lcov.py | j2ibeo/askalcoin | 56cbf4ad39e8b43c65dad647531b5e573584ac91 | [
"MIT"
]
| null | null | null | contrib/filter-lcov.py | j2ibeo/askalcoin | 56cbf4ad39e8b43c65dad647531b5e573584ac91 | [
"MIT"
]
| null | null | null | contrib/filter-lcov.py | j2ibeo/askalcoin | 56cbf4ad39e8b43c65dad647531b5e573584ac91 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Askalcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
parser = argparse.ArgumentParser(description='Remove the coverage data from a tracefile for all files matching the pattern.')
parser.add_argument('--pattern', '-p', action='append', help='the pattern of files to remove', required=True)
parser.add_argument('tracefile', help='the tracefile to remove the coverage data from')
parser.add_argument('outfile', help='filename for the output to be written to')
args = parser.parse_args()
tracefile = args.tracefile
pattern = args.pattern
outfile = args.outfile
in_remove = False
with open(tracefile, 'r', encoding="utf8") as f:
with open(outfile, 'w', encoding="utf8") as wf:
for line in f:
for p in pattern:
if line.startswith("SF:") and p in line:
in_remove = True
if not in_remove:
wf.write(line)
if line == 'end_of_record\n':
in_remove = False
| 39.413793 | 125 | 0.67979 |
4f0d585475669677a437d5567ad7e050c51db64a | 804 | py | Python | invalidation.py | tored11/DRF-redis-cache-decorator | 8653f8afce35554e1bc00a5158871b152c5535b0 | [
"MIT"
]
| null | null | null | invalidation.py | tored11/DRF-redis-cache-decorator | 8653f8afce35554e1bc00a5158871b152c5535b0 | [
"MIT"
]
| null | null | null | invalidation.py | tored11/DRF-redis-cache-decorator | 8653f8afce35554e1bc00a5158871b152c5535b0 | [
"MIT"
]
| null | null | null | from django.core.cache import cache
from .key_construction import get_user_cache_key, get_model_cache_key
def invalidate_model_cache(model):
"""
Invalidates all model related caches.
:param model: Model class
"""
model_name = get_model_cache_key(model)
invalidate_cache_key_pattern(model_name)
def invalidate_cache_key_pattern(cache_key):
"""
Invalidates all patterns of specific cache_key
:param cache_key: cache key to invalidate, including those,
which have key's pattern
"""
cache.delete_pattern(f'*{cache_key}*')
def invalidate_user_related_cache(user):
"""
Invalidates all user related cache
:param user: Specific user instance
"""
user_cache_key = get_user_cache_key(user)
invalidate_cache_key_pattern(user_cache_key)
| 25.935484 | 69 | 0.743781 |
b9e245c35f72e903156ccaee8e7d434bfd9b864d | 3,630 | py | Python | models/rscnn_ssn_cls.py | v-wewei/Relation-Shape-CNN | 04c114d6eaf981736721f0013dab4fc3c91ae05f | [
"MIT"
]
| 421 | 2019-04-17T01:52:40.000Z | 2022-03-23T09:42:54.000Z | models/rscnn_ssn_cls.py | v-wewei/Relation-Shape-CNN | 04c114d6eaf981736721f0013dab4fc3c91ae05f | [
"MIT"
]
| 45 | 2019-04-19T02:35:53.000Z | 2022-02-15T10:18:17.000Z | models/rscnn_ssn_cls.py | v-wewei/Relation-Shape-CNN | 04c114d6eaf981736721f0013dab4fc3c91ae05f | [
"MIT"
]
| 84 | 2019-04-17T16:20:45.000Z | 2022-03-29T07:55:18.000Z | import os, sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, "../utils"))
import torch
import torch.nn as nn
from torch.autograd import Variable
import pytorch_utils as pt_utils
from pointnet2_modules import PointnetSAModule, PointnetSAModuleMSG
import numpy as np
# Relation-Shape CNN: Single-Scale Neighborhood
class RSCNN_SSN(nn.Module):
r"""
PointNet2 with multi-scale grouping
Semantic segmentation network that uses feature propogation layers
Parameters
----------
num_classes: int
Number of semantics classes to predict over -- size of softmax classifier that run for each point
input_channels: int = 6
Number of input channels in the feature descriptor for each point. If the point cloud is Nx9, this
value should be 6 as in an Nx9 point cloud, 3 of the channels are xyz, and 6 are feature descriptors
use_xyz: bool = True
Whether or not to use the xyz position of a point as a feature
"""
def __init__(self, num_classes, input_channels=0, relation_prior=1, use_xyz=True):
super().__init__()
self.SA_modules = nn.ModuleList()
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=512,
radii=[0.23],
nsamples=[48],
mlps=[[input_channels, 128]],
first_layer=True,
use_xyz=use_xyz,
relation_prior=relation_prior
)
)
self.SA_modules.append(
PointnetSAModuleMSG(
npoint=128,
radii=[0.32],
nsamples=[64],
mlps=[[128, 512]],
use_xyz=use_xyz,
relation_prior=relation_prior
)
)
self.SA_modules.append(
# global convolutional pooling
PointnetSAModule(
nsample = 128,
mlp=[512, 1024],
use_xyz=use_xyz
)
)
self.FC_layer = nn.Sequential(
pt_utils.FC(1024, 512, activation=nn.ReLU(inplace=True), bn=True),
nn.Dropout(p=0.5),
pt_utils.FC(512, 256, activation=nn.ReLU(inplace=True), bn=True),
nn.Dropout(p=0.5),
pt_utils.FC(256, num_classes, activation=None)
)
def _break_up_pc(self, pc):
xyz = pc[..., 0:3].contiguous()
features = (
pc[..., 3:].transpose(1, 2).contiguous()
if pc.size(-1) > 3 else None
)
return xyz, features
def forward(self, pointcloud: torch.cuda.FloatTensor):
r"""
Forward pass of the network
Parameters
----------
pointcloud: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
"""
xyz, features = self._break_up_pc(pointcloud)
for module in self.SA_modules:
xyz, features = module(xyz, features)
return self.FC_layer(features.squeeze(-1))
if __name__ == "__main__":
sim_data = Variable(torch.rand(32, 2048, 6))
sim_data = sim_data.cuda()
sim_cls = Variable(torch.ones(32, 16))
sim_cls = sim_cls.cuda()
seg = RSCNN_SSN(num_classes=50, input_channels=3, use_xyz=True)
seg = seg.cuda()
out = seg(sim_data, sim_cls)
print('seg', out.size()) | 33.302752 | 112 | 0.571625 |
68461dca6710b158fb77a6647536d4434a21c133 | 3,477 | py | Python | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/python-mega-algo/scripts/validate_solutions.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
]
| 5 | 2021-06-02T23:44:25.000Z | 2021-12-27T16:21:57.000Z | scripts/validate_solutions.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
]
| 22 | 2021-05-31T01:33:25.000Z | 2021-10-18T18:32:39.000Z | scripts/validate_solutions.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
]
| 3 | 2021-06-19T03:37:47.000Z | 2021-08-31T00:49:51.000Z | #!/usr/bin/env python3
import hashlib
import importlib.util
import json
import os
import pathlib
from types import ModuleType
import pytest
import requests
PROJECT_EULER_DIR_PATH = pathlib.Path.cwd().joinpath("project_euler")
PROJECT_EULER_ANSWERS_PATH = pathlib.Path.cwd().joinpath(
"scripts", "project_euler_answers.json"
)
with open(PROJECT_EULER_ANSWERS_PATH) as file_handle:
PROBLEM_ANSWERS: dict[str, str] = json.load(file_handle)
def convert_path_to_module(file_path: pathlib.Path) -> ModuleType:
"""Converts a file path to a Python module"""
spec = importlib.util.spec_from_file_location(file_path.name, str(file_path))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return module
def all_solution_file_paths() -> list[pathlib.Path]:
"""Collects all the solution file path in the Project Euler directory"""
solution_file_paths = []
for problem_dir_path in PROJECT_EULER_DIR_PATH.iterdir():
if problem_dir_path.is_file() or problem_dir_path.name.startswith("_"):
continue
for file_path in problem_dir_path.iterdir():
if file_path.suffix != ".py" or file_path.name.startswith(("_", "test")):
continue
solution_file_paths.append(file_path)
return solution_file_paths
def get_files_url() -> str:
"""Return the pull request number which triggered this action."""
with open(os.environ["GITHUB_EVENT_PATH"]) as file:
event = json.load(file)
return event["pull_request"]["url"] + "/files"
def added_solution_file_path() -> list[pathlib.Path]:
"""Collects only the solution file path which got added in the current
pull request.
This will only be triggered if the script is ran from GitHub Actions.
"""
solution_file_paths = []
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": "token " + os.environ["GITHUB_TOKEN"],
}
files = requests.get(get_files_url(), headers=headers).json()
for file in files:
filepath = pathlib.Path.cwd().joinpath(file["filename"])
if (
filepath.suffix != ".py"
or filepath.name.startswith(("_", "test"))
or not filepath.name.startswith("sol")
):
continue
solution_file_paths.append(filepath)
return solution_file_paths
def collect_solution_file_paths() -> list[pathlib.Path]:
if os.environ.get("CI") and os.environ.get("GITHUB_EVENT_NAME") == "pull_request":
# Return only if there are any, otherwise default to all solutions
if filepaths := added_solution_file_path():
return filepaths
return all_solution_file_paths()
@pytest.mark.parametrize(
"solution_path",
collect_solution_file_paths(),
ids=lambda path: f"{path.parent.name}/{path.name}",
)
def test_project_euler(solution_path: pathlib.Path) -> None:
"""Testing for all Project Euler solutions"""
# problem_[extract this part] and pad it with zeroes for width 3
problem_number: str = solution_path.parent.name[8:].zfill(3)
expected: str = PROBLEM_ANSWERS[problem_number]
solution_module = convert_path_to_module(solution_path)
answer = str(solution_module.solution()) # type: ignore
answer = hashlib.sha256(answer.encode()).hexdigest()
assert (
answer == expected
), f"Expected solution to {problem_number} to have hash {expected}, got {answer}"
| 35.845361 | 86 | 0.694277 |
cc806f4a0b41e7f3e89bc14ef30b653aaeb3e9cf | 6,825 | py | Python | tf2onnx/optimizer/const_fold_optimizer.py | garymm/tensorflow-onnx | a8f78ac7903493dee579304b7b1717aa9ec9706f | [
"Apache-2.0"
]
| null | null | null | tf2onnx/optimizer/const_fold_optimizer.py | garymm/tensorflow-onnx | a8f78ac7903493dee579304b7b1717aa9ec9706f | [
"Apache-2.0"
]
| null | null | null | tf2onnx/optimizer/const_fold_optimizer.py | garymm/tensorflow-onnx | a8f78ac7903493dee579304b7b1717aa9ec9706f | [
"Apache-2.0"
]
| null | null | null | # SPDX-License-Identifier: Apache-2.0
"""const fold Optimizer.
if op's inputs are all const then do op computation when building the graph to improve performance
for example, input of transpose node is const then we can do transpose statically instead of at runtime
"""
import numpy as np
from .. import utils
from .optimizer_base import GraphOptimizerBase
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring
# key is op_type, value is the function to compute outputs
# the schema of function is: inputs are(node, graph), output is a list of constant values.
_func_map = {}
def _register_func(op_type):
def _internal_fun(func):
_func_map[op_type] = func
return func
return _internal_fun
class ConstFoldOptimizer(GraphOptimizerBase):
def __init__(self): # pylint: disable=useless-super-delegation
super(ConstFoldOptimizer, self).__init__()
def _optimize(self, graph):
return self._apply_optimization(graph, self._optimize_at_current_graph_level)
def _optimize_at_current_graph_level(self, graph):
graph_changed = True
while graph_changed:
graph_changed = False
ops = graph.get_nodes()
for op in ops:
if self._should_skip(op):
continue
if self._fold_node(op, graph):
graph_changed = True
self.graph_been_opt = True
return graph
@staticmethod
def _should_skip(node):
# only support onnx official op for now, op in other domain is not supported for now
if not utils.is_onnx_domain(node.domain):
return True
if node.is_const() or node.is_graph_input():
return True
skip_type = ["Identity", "DequantizeLinear"]
if node.type in skip_type:
return True
return False
def _fold_node(self, node, graph):
""" if node's input are all const and it's not graph's output then it can be fold.
if node can be fold True will be return indicating that graph is changed
"""
if self._all_inputs_are_const(node.inputs) and not self._is_graph_output(node, graph):
process_func = _func_map.get(node.type, None)
if process_func:
const_outputs = process_func(node, graph)
self._replace_node_with_const(node, graph, const_outputs)
return True
self.logger.debug("need to add function to fold op %s whose op_type is %s", node.name, node.type)
return False
@staticmethod
def compute_const_folding(node, graph):
return _func_map[node.type](node, graph)
@staticmethod
def _all_inputs_are_const(nodes):
return all(node.is_const() for node in nodes if node)
@staticmethod
def _is_graph_output(node, graph):
node_out_set = set(node.output)
graph_out_set = set(graph.outputs)
return node_out_set.intersection(graph_out_set)
@staticmethod
def _replace_node_with_const(node, graph, vals):
utils.make_sure(len(node.output) == len(vals), "length of node outputs and const vals should be same")
for old_input, val in zip(node.output, vals):
const_node = graph.make_const(utils.make_name("const_fold_opt"), val)
graph.set_dtype(const_node.output[0], utils.map_numpy_to_onnx_dtype(val.dtype))
graph.set_shape(const_node.output[0], val.shape)
graph.replace_all_inputs(old_input, const_node.output[0]) # ops=graph.get_nodes()
graph.remove_node(node.name)
@staticmethod
@_register_func("Cast")
def _fold_cast(node, graph):
const_val = node.inputs[0].get_tensor_value(as_list=False)
np_dtype = utils.ONNX_TO_NUMPY_DTYPE[node.get_attr("to").i]
const_val_after_cast = const_val.astype(np_dtype)
return [const_val_after_cast]
@staticmethod
@_register_func("Transpose")
def _fold_transpose(node, graph) -> list:
const_val = node.inputs[0].get_tensor_value(as_list=False)
perm_attr = node.get_attr("perm")
perm = perm_attr.ints if perm_attr else None
const_val_after_trans = const_val.transpose(perm)
return [const_val_after_trans]
@staticmethod
@_register_func("Reshape")
def _fold_reshape(node, graph):
const_val_data = node.inputs[0].get_tensor_value(as_list=False)
const_val_shape = node.inputs[1].get_tensor_value(as_list=True)
data_shape = const_val_data.shape
for i, dim in enumerate(const_val_shape):
if dim == 0:
# In ORT a dim of 0 means the shape stays the same.
const_val_shape[i] = data_shape[i]
const_val_after_trans = const_val_data.reshape(const_val_shape)
return [const_val_after_trans]
@staticmethod
@_register_func("Concat")
def _fold_concat(node, graph):
axis = node.get_attr_value('axis')
res = np.concatenate([inp.get_tensor_value(as_list=False) for inp in node.inputs], axis)
return [res]
@staticmethod
@_register_func("Unsqueeze")
def _fold_unsqueeze(node, graph):
"""
numpy expand_dims only supports to unsqueeze one dim one time, so reshape is used to simplify the logic
"""
const_val = node.inputs[0].get_tensor_value(as_list=False)
if graph.opset >= 13:
axes = node.inputs[1].get_tensor_value(as_list=True)
else:
axes = list(node.get_attr("axes").ints)
shape_in = const_val.shape
dims_out = len(shape_in) + len(axes)
axes = [i if i >= 0 else i + dims_out for i in axes]
# calculate the shape of output accroding to onnx Unsqueeze's spec
# https://github.com/onnx/onnx/blob/main/docs/Operators.md#Unsqueeze
shape_in = iter(shape_in)
shape_out = [None] * dims_out
for ind in axes:
shape_out[ind] = 1
for ind, val in enumerate(shape_out):
if val is None:
shape_out[ind] = next(shape_in)
const_val_after_unsqueeze = const_val.reshape(shape_out)
return [const_val_after_unsqueeze]
@staticmethod
@_register_func("Split")
def _fold_split(node, graph):
data = node.inputs[0].get_tensor_value(as_list=False)
axis = node.get_attr_value('axis', 0)
if len(node.output) == 1:
return [data]
split = node.get_attr_value('split')
if len(node.input) > 1:
split = node.inputs[1].get_tensor_value(as_list=False)
if split is not None:
indices_or_sections = np.cumsum(split[:-1])
else:
indices_or_sections = len(node.output)
return np.split(data, indices_or_sections, axis)
| 37.916667 | 111 | 0.653187 |
ca6f67a7cc855cbdafffcebccb10c6deb6c204c1 | 1,897 | py | Python | ivy_tests/test_array_api/array_api_tests/special_cases/test_acosh.py | djl11/ivy | 209f74b5a1a82ca69ad712788ae0469c3f8614d9 | [
"Apache-2.0"
]
| null | null | null | ivy_tests/test_array_api/array_api_tests/special_cases/test_acosh.py | djl11/ivy | 209f74b5a1a82ca69ad712788ae0469c3f8614d9 | [
"Apache-2.0"
]
| null | null | null | ivy_tests/test_array_api/array_api_tests/special_cases/test_acosh.py | djl11/ivy | 209f74b5a1a82ca69ad712788ae0469c3f8614d9 | [
"Apache-2.0"
]
| null | null | null | """
Special cases tests for acosh.
These tests are generated from the special cases listed in the spec.
NOTE: This file is generated automatically by the generate_stubs.py script. Do
not modify it directly.
"""
from ..array_helpers import NaN, assert_exactly_equal, exactly_equal, infinity, less, one, zero
from ..hypothesis_helpers import numeric_arrays
from .._array_module import acosh
from hypothesis import given
#
# @given(numeric_arrays)
# def test_acosh_special_cases_one_arg_equal_1(arg1):
# """
# Special case test for `acosh(x, /)`:
#
# - If `x_i` is `NaN`, the result is `NaN`.
#
# """
# res = acosh(arg1)
# mask = exactly_equal(arg1, NaN(arg1.shape, arg1.dtype))
# assert_exactly_equal(res[mask], (NaN(arg1.shape, arg1.dtype))[mask])
#
#
# @given(numeric_arrays)
# def test_acosh_special_cases_one_arg_equal_2(arg1):
# """
# Special case test for `acosh(x, /)`:
#
# - If `x_i` is `1`, the result is `+0`.
#
# """
# res = acosh(arg1)
# mask = exactly_equal(arg1, one(arg1.shape, arg1.dtype))
# assert_exactly_equal(res[mask], (zero(arg1.shape, arg1.dtype))[mask])
#
#
# @given(numeric_arrays)
# def test_acosh_special_cases_one_arg_equal_3(arg1):
# """
# Special case test for `acosh(x, /)`:
#
# - If `x_i` is `+infinity`, the result is `+infinity`.
#
# """
# res = acosh(arg1)
# mask = exactly_equal(arg1, infinity(arg1.shape, arg1.dtype))
# assert_exactly_equal(res[mask], (infinity(arg1.shape, arg1.dtype))[mask])
#
#
# @given(numeric_arrays)
# def test_acosh_special_cases_one_arg_less(arg1):
# """
# Special case test for `acosh(x, /)`:
#
# - If `x_i` is less than `1`, the result is `NaN`.
#
# """
# res = acosh(arg1)
# mask = less(arg1, one(arg1.shape, arg1.dtype))
# assert_exactly_equal(res[mask], (NaN(arg1.shape, arg1.dtype))[mask])
| 28.313433 | 95 | 0.647865 |
70fd9a74bdbaa067ea3e3f9376e4ac8e51a65416 | 1,057 | py | Python | python/tests/test_plot.py | jsemric/keepsake | d9a7922556748e9e913e83c48e5378b7324eeac7 | [
"Apache-2.0"
]
| 810 | 2021-02-09T09:26:26.000Z | 2022-03-25T14:06:13.000Z | python/tests/test_plot.py | jsemric/keepsake | d9a7922556748e9e913e83c48e5378b7324eeac7 | [
"Apache-2.0"
]
| 347 | 2021-02-08T07:24:29.000Z | 2022-03-31T23:05:29.000Z | python/tests/test_plot.py | jsemric/keepsake | d9a7922556748e9e913e83c48e5378b7324eeac7 | [
"Apache-2.0"
]
| 43 | 2020-10-30T19:55:42.000Z | 2021-01-18T22:41:49.000Z | import datetime
import matplotlib.pyplot as plt
from keepsake.checkpoint import Checkpoint, CheckpointList
from keepsake.experiment import Experiment, ExperimentList
from keepsake.project import Project, init
def test_num_plots(temp_workdir):
with open("keepsake.yaml", "w") as f:
f.write("repository: file://.keepsake/")
experiment = init(path=".", params={"learning_rate": 0.1, "num_epochs": 1},)
experiment.checkpoint(
path=".",
step=1,
metrics={"loss": 1.1836304664611816, "accuracy": 0.3333333432674408},
primary_metric=("loss", "minimize"),
)
experiment.checkpoint(
path=".",
step=2,
metrics={"loss": 1.1836304662222222, "accuracy": 0.4333333432674408},
primary_metric=("loss", "minimize"),
)
experiment_list = ExperimentList([experiment])
num_plots = 30
for rep in range(num_plots):
experiment_list.plot()
assert len(plt.get_fignums()) == 1
experiment_list.plot(metric="accuracy")
assert len(plt.get_fignums()) == 2
| 30.2 | 80 | 0.662252 |
3a312a33d9f450e9d41221c8d5dfcf426afe450f | 42,724 | py | Python | tools/build/v2/build/virtual_target.py | jmuskaan72/Boost | 047e36c01841a8cd6a5c74d4e3034da46e327bc1 | [
"BSL-1.0"
]
| 198 | 2015-01-13T05:47:18.000Z | 2022-03-09T04:46:46.000Z | tools/build/v2/build/virtual_target.py | xiaoliang2121/Boost | fc90c3fde129c62565c023f091eddc4a7ed9902b | [
"BSL-1.0"
]
| 4 | 2015-03-19T08:23:23.000Z | 2019-06-24T07:48:47.000Z | tools/build/v2/build/virtual_target.py | xiaoliang2121/Boost | fc90c3fde129c62565c023f091eddc4a7ed9902b | [
"BSL-1.0"
]
| 139 | 2015-01-15T20:09:31.000Z | 2022-01-31T15:21:16.000Z | # Status: ported.
# Base revision: 64488.
#
# Copyright (C) Vladimir Prus 2002. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
# Implements virtual targets, which correspond to actual files created during
# build, but are not yet targets in Jam sense. They are needed, for example,
# when searching for possible transormation sequences, when it's not known
# if particular target should be created at all.
#
#
# +--------------------------+
# | VirtualTarget |
# +==========================+
# | actualize |
# +--------------------------+
# | actualize_action() = 0 |
# | actualize_location() = 0 |
# +----------------+---------+
# |
# ^
# / \
# +-+-+
# |
# +---------------------+ +-------+--------------+
# | Action | | AbstractFileTarget |
# +=====================| * +======================+
# | action_name | +--+ action |
# | properties | | +----------------------+
# +---------------------+--+ | actualize_action() |
# | actualize() |0..1 +-----------+----------+
# | path() | |
# | adjust_properties() | sources |
# | actualize_sources() | targets |
# +------+--------------+ ^
# | / \
# ^ +-+-+
# / \ |
# +-+-+ +-------------+-------------+
# | | |
# | +------+---------------+ +--------+-------------+
# | | FileTarget | | SearchedLibTarget |
# | +======================+ +======================+
# | | actualize-location() | | actualize-location() |
# | +----------------------+ +----------------------+
# |
# +-+------------------------------+
# | |
# +----+----------------+ +---------+-----------+
# | CompileAction | | LinkAction |
# +=====================+ +=====================+
# | adjust_properties() | | adjust_properties() |
# +---------------------+ | actualize_sources() |
# +---------------------+
#
# The 'CompileAction' and 'LinkAction' classes are defined not here,
# but in builtin.jam modules. They are shown in the diagram to give
# the big picture.
import bjam
import re
import os.path
import string
import types
from b2.util import path, utility, set
from b2.util.utility import add_grist, get_grist, ungrist, replace_grist, get_value
from b2.util.sequence import unique
from b2.tools import common
from b2.exceptions import *
import b2.build.type
import b2.build.property_set as property_set
import b2.build.property as property
from b2.manager import get_manager
from b2.util import bjam_signature
__re_starts_with_at = re.compile ('^@(.*)')
class VirtualTargetRegistry:
def __init__ (self, manager):
self.manager_ = manager
# A cache for FileTargets
self.files_ = {}
# A cache for targets.
self.cache_ = {}
# A map of actual names to virtual targets.
# Used to make sure we don't associate same
# actual target to two virtual targets.
self.actual_ = {}
self.recent_targets_ = []
# All targets ever registed
self.all_targets_ = []
self.next_id_ = 0
def register (self, target):
""" Registers a new virtual target. Checks if there's already registered target, with the same
name, type, project and subvariant properties, and also with the same sources
and equal action. If such target is found it is retured and 'target' is not registered.
Otherwise, 'target' is registered and returned.
"""
if target.path():
signature = target.path() + "-" + target.name()
else:
signature = "-" + target.name()
result = None
if not self.cache_.has_key (signature):
self.cache_ [signature] = []
for t in self.cache_ [signature]:
a1 = t.action ()
a2 = target.action ()
# TODO: why are we checking for not result?
if not result:
if not a1 and not a2:
result = t
else:
if a1 and a2 and a1.action_name () == a2.action_name () and a1.sources () == a2.sources ():
ps1 = a1.properties ()
ps2 = a2.properties ()
p1 = ps1.base () + ps1.free () +\
b2.util.set.difference(ps1.dependency(), ps1.incidental())
p2 = ps2.base () + ps2.free () +\
b2.util.set.difference(ps2.dependency(), ps2.incidental())
if p1 == p2:
result = t
if not result:
self.cache_ [signature].append (target)
result = target
# TODO: Don't append if we found pre-existing target?
self.recent_targets_.append(result)
self.all_targets_.append(result)
return result
def from_file (self, file, file_location, project):
""" Creates a virtual target with appropriate name and type from 'file'.
If a target with that name in that project was already created, returns that already
created target.
TODO: more correct way would be to compute path to the file, based on name and source location
for the project, and use that path to determine if the target was already created.
TODO: passing project with all virtual targets starts to be annoying.
"""
# Check if we've created a target corresponding to this file.
path = os.path.join(os.getcwd(), file_location, file)
path = os.path.normpath(path)
if self.files_.has_key (path):
return self.files_ [path]
file_type = b2.build.type.type (file)
result = FileTarget (file, file_type, project,
None, file_location)
self.files_ [path] = result
return result
def recent_targets(self):
"""Each target returned by 'register' is added to a list of
'recent-target', returned by this function. So, this allows
us to find all targets created when building a given main
target, even if the target."""
return self.recent_targets_
def clear_recent_targets(self):
self.recent_targets_ = []
def all_targets(self):
# Returns all virtual targets ever created
return self.all_targets_
# Returns all targets from 'targets' with types
# equal to 'type' or derived from it.
def select_by_type(self, type, targets):
return [t for t in targets if b2.build.type.is_sybtype(t.type(), type)]
def register_actual_name (self, actual_name, virtual_target):
if self.actual_.has_key (actual_name):
cs1 = self.actual_ [actual_name].creating_subvariant ()
cs2 = virtual_target.creating_subvariant ()
cmt1 = cs1.main_target ()
cmt2 = cs2.main_target ()
action1 = self.actual_ [actual_name].action ()
action2 = virtual_target.action ()
properties_added = []
properties_removed = []
if action1 and action2:
p1 = action1.properties ()
p1 = p1.raw ()
p2 = action2.properties ()
p2 = p2.raw ()
properties_removed = set.difference (p1, p2)
if not properties_removed: properties_removed = "none"
properties_added = set.difference (p2, p1)
if not properties_added: properties_added = "none"
# FIXME: Revive printing of real location.
get_manager().errors()(
"Duplicate name of actual target: '%s'\n"
"previous virtual target '%s'\n"
"created from '%s'\n"
"another virtual target '%s'\n"
"created from '%s'\n"
"added properties: '%s'\n"
"removed properties: '%s'\n"
% (actual_name,
self.actual_ [actual_name], "loc", #cmt1.location (),
virtual_target,
"loc", #cmt2.location (),
properties_added, properties_removed))
else:
self.actual_ [actual_name] = virtual_target
def add_suffix (self, specified_name, file_type, prop_set):
""" Appends the suffix appropriate to 'type/property_set' combination
to the specified name and returns the result.
"""
suffix = b2.build.type.generated_target_suffix (file_type, prop_set)
if suffix:
return specified_name + '.' + suffix
else:
return specified_name
class VirtualTarget:
""" Potential target. It can be converted into jam target and used in
building, if needed. However, it can be also dropped, which allows
to search for different transformation and select only one.
name: name of this target.
project: project to which this target belongs.
"""
def __init__ (self, name, project):
self.name_ = name
self.project_ = project
self.dependencies_ = []
self.always_ = False
# Caches if dapendencies for scanners have already been set.
self.made_ = {}
def manager(self):
return self.project_.manager()
def virtual_targets(self):
return self.manager().virtual_targets()
def name (self):
""" Name of this target.
"""
return self.name_
def project (self):
""" Project of this target.
"""
return self.project_
def depends (self, d):
""" Adds additional instances of 'VirtualTarget' that this
one depends on.
"""
self.dependencies_ = unique (self.dependencies_ + d).sort ()
def dependencies (self):
return self.dependencies_
def always(self):
self.always_ = True
def actualize (self, scanner = None):
""" Generates all the actual targets and sets up build actions for
this target.
If 'scanner' is specified, creates an additional target
with the same location as actual target, which will depend on the
actual target and be associated with 'scanner'. That additional
target is returned. See the docs (#dependency_scanning) for rationale.
Target must correspond to a file if 'scanner' is specified.
If scanner is not specified, then actual target is returned.
"""
actual_name = self.actualize_no_scanner ()
if self.always_:
bjam.call("ALWAYS", actual_name)
if not scanner:
return actual_name
else:
# Add the scanner instance to the grist for name.
g = '-'.join ([ungrist(get_grist(actual_name)), str(id(scanner))])
name = replace_grist (actual_name, '<' + g + '>')
if not self.made_.has_key (name):
self.made_ [name] = True
self.project_.manager ().engine ().add_dependency (name, actual_name)
self.actualize_location (name)
self.project_.manager ().scanners ().install (scanner, name, str (self))
return name
# private: (overridables)
def actualize_action (self, target):
""" Sets up build actions for 'target'. Should call appropriate rules
and set target variables.
"""
raise BaseException ("method should be defined in derived classes")
def actualize_location (self, target):
""" Sets up variables on 'target' which specify its location.
"""
raise BaseException ("method should be defined in derived classes")
def path (self):
""" If the target is generated one, returns the path where it will be
generated. Otherwise, returns empty list.
"""
raise BaseException ("method should be defined in derived classes")
def actual_name (self):
""" Return that actual target name that should be used
(for the case where no scanner is involved)
"""
raise BaseException ("method should be defined in derived classes")
class AbstractFileTarget (VirtualTarget):
""" Target which correspond to a file. The exact mapping for file
is not yet specified in this class. (TODO: Actually, the class name
could be better...)
May be a source file (when no action is specified), or
derived file (otherwise).
The target's grist is concatenation of project's location,
properties of action (for derived files), and, optionally,
value identifying the main target.
exact: If non-empty, the name is exactly the name
created file should have. Otherwise, the '__init__'
method will add suffix obtained from 'type' by
calling 'type.generated-target-suffix'.
type: optional type of this target.
"""
def __init__ (self, name, type, project, action = None, exact=False):
VirtualTarget.__init__ (self, name, project)
self.type_ = type
self.action_ = action
self.exact_ = exact
if action:
action.add_targets ([self])
if self.type and not exact:
self.__adjust_name (name)
self.actual_name_ = None
self.path_ = None
self.intermediate_ = False
self.creating_subvariant_ = None
# True if this is a root target.
self.root_ = False
def type (self):
return self.type_
def set_path (self, path):
""" Sets the path. When generating target name, it will override any path
computation from properties.
"""
self.path_ = path
def action (self):
""" Returns the action.
"""
return self.action_
def root (self, set = None):
""" Sets/gets the 'root' flag. Target is root is it directly correspods to some
variant of a main target.
"""
if set:
self.root_ = True
return self.root_
def creating_subvariant (self, s = None):
""" Gets or sets the subvariant which created this target. Subvariant
is set when target is brought into existance, and is never changed
after that. In particual, if target is shared by subvariant, only
the first is stored.
s: If specified, specified the value to set,
which should be instance of 'subvariant' class.
"""
if s and not self.creating_subvariant ():
if self.creating_subvariant ():
raise BaseException ("Attempt to change 'dg'")
else:
self.creating_subvariant_ = s
return self.creating_subvariant_
def actualize_action (self, target):
if self.action_:
self.action_.actualize ()
# Return a human-readable representation of this target
#
# If this target has an action, that's:
#
# { <action-name>-<self.name>.<self.type> <action-sources>... }
#
# otherwise, it's:
#
# { <self.name>.<self.type> }
#
def str(self):
a = self.action()
name_dot_type = self.name_ + "." + self.type_
if a:
action_name = a.action_name()
ss = [ s.str() for s in a.sources()]
return "{ %s-%s %s}" % (action_name, name_dot_type, str(ss))
else:
return "{ " + name_dot_type + " }"
# private:
def actual_name (self):
if not self.actual_name_:
self.actual_name_ = '<' + self.grist() + '>' + self.name_
return self.actual_name_
def grist (self):
"""Helper to 'actual_name', above. Compute unique prefix used to distinguish
this target from other targets with the same name which create different
file.
"""
# Depending on target, there may be different approaches to generating
# unique prefixes. We'll generate prefixes in the form
# <one letter approach code> <the actual prefix>
path = self.path ()
if path:
# The target will be generated to a known path. Just use the path
# for identification, since path is as unique as it can get.
return 'p' + path
else:
# File is either source, which will be searched for, or is not a file at
# all. Use the location of project for distinguishing.
project_location = self.project_.get ('location')
path_components = b2.util.path.split(project_location)
location_grist = '!'.join (path_components)
if self.action_:
ps = self.action_.properties ()
property_grist = ps.as_path ()
# 'property_grist' can be empty when 'ps' is an empty
# property set.
if property_grist:
location_grist = location_grist + '/' + property_grist
return 'l' + location_grist
def __adjust_name(self, specified_name):
"""Given the target name specified in constructor, returns the
name which should be really used, by looking at the <tag> properties.
The tag properties come in two flavour:
- <tag>value,
- <tag>@rule-name
In the first case, value is just added to name
In the second case, the specified rule is called with specified name,
target type and properties and should return the new name.
If not <tag> property is specified, or the rule specified by
<tag> returns nothing, returns the result of calling
virtual-target.add-suffix"""
if self.action_:
ps = self.action_.properties()
else:
ps = property_set.empty()
# FIXME: I'm not sure how this is used, need to check with
# Rene to figure out how to implement
#~ We add ourselves to the properties so that any tag rule can get
#~ more direct information about the target than just that available
#~ through the properties. This is useful in implementing
#~ name changes based on the sources of the target. For example to
#~ make unique names of object files based on the source file.
#~ --grafik
#ps = property_set.create(ps.raw() + ["<target>%s" % "XXXX"])
#ps = [ property-set.create [ $(ps).raw ] <target>$(__name__) ] ;
tag = ps.get("<tag>")
if tag:
if len(tag) > 1:
get_manager().errors()(
"""<tag>@rulename is present but is not the only <tag> feature""")
tag = tag[0]
if callable(tag):
self.name_ = tag(specified_name, self.type_, ps)
else:
if not tag[0] == '@':
self.manager_.errors()("""The value of the <tag> feature must be '@rule-nane'""")
exported_ps = b2.util.value_to_jam(ps, methods=True)
self.name_ = b2.util.call_jam_function(
tag[1:], specified_name, self.type_, exported_ps)
if self.name_:
self.name_ = self.name_[0]
# If there's no tag or the tag rule returned nothing.
if not tag or not self.name_:
self.name_ = add_prefix_and_suffix(specified_name, self.type_, ps)
def actualize_no_scanner(self):
name = self.actual_name()
# Do anything only on the first invocation
if not self.made_:
self.made_[name] = True
if self.action_:
# For non-derived target, we don't care if there
# are several virtual targets that refer to the same name.
# One case when this is unavoidable is when file name is
# main.cpp and two targets have types CPP (for compiling)
# and MOCCABLE_CPP (for convertion to H via Qt tools).
self.virtual_targets().register_actual_name(name, self)
for i in self.dependencies_:
self.manager_.engine().add_dependency(name, i.actualize())
self.actualize_location(name)
self.actualize_action(name)
return name
@bjam_signature((["specified_name"], ["type"], ["property_set"]))
def add_prefix_and_suffix(specified_name, type, property_set):
"""Appends the suffix appropriate to 'type/property-set' combination
to the specified name and returns the result."""
property_set = b2.util.jam_to_value_maybe(property_set)
suffix = ""
if type:
suffix = b2.build.type.generated_target_suffix(type, property_set)
# Handle suffixes for which no leading dot is desired. Those are
# specified by enclosing them in <...>. Needed by python so it
# can create "_d.so" extensions, for example.
if get_grist(suffix):
suffix = ungrist(suffix)
elif suffix:
suffix = "." + suffix
prefix = ""
if type:
prefix = b2.build.type.generated_target_prefix(type, property_set)
if specified_name.startswith(prefix):
prefix = ""
if not prefix:
prefix = ""
if not suffix:
suffix = ""
return prefix + specified_name + suffix
class FileTarget (AbstractFileTarget):
""" File target with explicitly known location.
The file path is determined as
- value passed to the 'set_path' method, if any
- for derived files, project's build dir, joined with components
that describe action's properties. If the free properties
are not equal to the project's reference properties
an element with name of main target is added.
- for source files, project's source dir
The file suffix is
- the value passed to the 'suffix' method, if any, or
- the suffix which correspond to the target's type.
"""
def __init__ (self, name, type, project, action = None, path=None, exact=False):
AbstractFileTarget.__init__ (self, name, type, project, action, exact)
self.path_ = path
def __str__(self):
if self.type_:
return self.name_ + "." + self.type_
else:
return self.name_
def clone_with_different_type(self, new_type):
return FileTarget(self.name_, new_type, self.project_,
self.action_, self.path_, exact=True)
def actualize_location (self, target):
engine = self.project_.manager_.engine ()
if self.action_:
# This is a derived file.
path = self.path ()
engine.set_target_variable (target, 'LOCATE', path)
# Make sure the path exists.
engine.add_dependency (target, path)
common.mkdir(engine, path)
# It's possible that the target name includes a directory
# too, for example when installing headers. Create that
# directory.
d = os.path.dirname(get_value(target))
if d:
d = os.path.join(path, d)
engine.add_dependency(target, d)
common.mkdir(engine, d)
# For real file target, we create a fake target that
# depends on the real target. This allows to run
#
# bjam hello.o
#
# without trying to guess the name of the real target.
# Note the that target has no directory name, and a special
# grist <e>.
#
# First, that means that "bjam hello.o" will build all
# known hello.o targets.
# Second, the <e> grist makes sure this target won't be confused
# with other targets, for example, if we have subdir 'test'
# with target 'test' in it that includes 'test.o' file,
# then the target for directory will be just 'test' the target
# for test.o will be <ptest/bin/gcc/debug>test.o and the target
# we create below will be <e>test.o
engine.add_dependency("<e>%s" % get_value(target), target)
# Allow bjam <path-to-file>/<file> to work. This won't catch all
# possible ways to refer to the path (relative/absolute, extra ".",
# various "..", but should help in obvious cases.
engine.add_dependency("<e>%s" % (os.path.join(path, get_value(target))), target)
else:
# This is a source file.
engine.set_target_variable (target, 'SEARCH', self.project_.get ('source-location'))
def path (self):
""" Returns the directory for this target.
"""
if not self.path_:
if self.action_:
p = self.action_.properties ()
(target_path, relative_to_build_dir) = p.target_path ()
if relative_to_build_dir:
# Indicates that the path is relative to
# build dir.
target_path = os.path.join (self.project_.build_dir (), target_path)
# Store the computed path, so that it's not recomputed
# any more
self.path_ = target_path
return self.path_
class NotFileTarget(AbstractFileTarget):
def __init__(self, name, project, action):
AbstractFileTarget.__init__(self, name, None, project, action)
def path(self):
"""Returns nothing, to indicate that target path is not known."""
return None
def actualize_location(self, target):
bjam.call("NOTFILE", target)
bjam.call("ALWAYS", target)
bjam.call("NOUPDATE", target)
class Action:
""" Class which represents an action.
Both 'targets' and 'sources' should list instances of 'VirtualTarget'.
Action name should name a rule with this prototype
rule action_name ( targets + : sources * : properties * )
Targets and sources are passed as actual jam targets. The rule may
not establish dependency relationship, but should do everything else.
"""
def __init__ (self, manager, sources, action_name, prop_set):
assert(isinstance(prop_set, property_set.PropertySet))
assert type(sources) == types.ListType
self.sources_ = sources
self.action_name_ = action_name
if not prop_set:
prop_set = property_set.empty()
self.properties_ = prop_set
if not all(isinstance(v, VirtualTarget) for v in prop_set.get('implicit-dependency')):
import pdb
pdb.set_trace()
self.manager_ = manager
self.engine_ = self.manager_.engine ()
self.targets_ = []
# Indicates whether this has been actualized or not.
self.actualized_ = False
self.dependency_only_sources_ = []
self.actual_sources_ = []
def add_targets (self, targets):
self.targets_ += targets
def replace_targets (old_targets, new_targets):
self.targets_ = [t for t in targets if not t in old_targets] + new_targets
def targets (self):
return self.targets_
def sources (self):
return self.sources_
def action_name (self):
return self.action_name_
def properties (self):
return self.properties_
def actualize (self):
""" Generates actual build instructions.
"""
if self.actualized_:
return
self.actualized_ = True
ps = self.properties ()
properties = self.adjust_properties (ps)
actual_targets = []
for i in self.targets ():
actual_targets.append (i.actualize ())
self.actualize_sources (self.sources (), properties)
self.engine_.add_dependency (actual_targets, self.actual_sources_ + self.dependency_only_sources_)
# This works around a bug with -j and actions that
# produce multiple target, where:
# - dependency on the first output is found, and
# the action is started
# - dependency on the second output is found, and
# bjam noticed that command is already running
# - instead of waiting for the command, dependents
# of the second targets are immediately updated.
if len(actual_targets) > 1:
bjam.call("INCLUDES", actual_targets, actual_targets)
# FIXME: check the comment below. Was self.action_name_ [1]
# Action name can include additional argument to rule, which should not
# be passed to 'set-target-variables'
# FIXME: breaking circular dependency
import toolset
toolset.set_target_variables (self.manager_, self.action_name_, actual_targets, properties)
engine = self.manager_.engine ()
# FIXME: this is supposed to help --out-xml option, but we don't
# implement that now, and anyway, we should handle it in Python,
# not but putting variables on bjam-level targets.
bjam.call("set-target-variable", actual_targets, ".action", repr(self))
self.manager_.engine ().set_update_action (self.action_name_, actual_targets, self.actual_sources_,
properties)
# Since we set up creating action here, we also set up
# action for cleaning up
self.manager_.engine ().set_update_action ('common.Clean', 'clean-all',
actual_targets)
return actual_targets
def actualize_source_type (self, sources, prop_set):
""" Helper for 'actualize_sources'.
For each passed source, actualizes it with the appropriate scanner.
Returns the actualized virtual targets.
"""
result = []
for i in sources:
scanner = None
# FIXME: what's this?
# if isinstance (i, str):
# i = self.manager_.get_object (i)
if i.type ():
scanner = b2.build.type.get_scanner (i.type (), prop_set)
r = i.actualize (scanner)
result.append (r)
return result
def actualize_sources (self, sources, prop_set):
""" Creates actual jam targets for sources. Initializes two member
variables:
'self.actual_sources_' -- sources which are passed to updating action
'self.dependency_only_sources_' -- sources which are made dependencies, but
are not used otherwise.
New values will be *appended* to the variables. They may be non-empty,
if caller wants it.
"""
dependencies = self.properties_.get ('<dependency>')
self.dependency_only_sources_ += self.actualize_source_type (dependencies, prop_set)
self.actual_sources_ += self.actualize_source_type (sources, prop_set)
# This is used to help bjam find dependencies in generated headers
# in other main targets.
# Say:
#
# make a.h : ....... ;
# exe hello : hello.cpp : <implicit-dependency>a.h ;
#
# However, for bjam to find the dependency the generated target must
# be actualized (i.e. have the jam target). In the above case,
# if we're building just hello ("bjam hello"), 'a.h' won't be
# actualized unless we do it here.
implicit = self.properties_.get("<implicit-dependency>")
for i in implicit:
i.actualize()
def adjust_properties (self, prop_set):
""" Determines real properties when trying building with 'properties'.
This is last chance to fix properties, for example to adjust includes
to get generated headers correctly. Default implementation returns
its argument.
"""
return prop_set
class NullAction (Action):
""" Action class which does nothing --- it produces the targets with
specific properties out of nowhere. It's needed to distinguish virtual
targets with different properties that are known to exist, and have no
actions which create them.
"""
def __init__ (self, manager, prop_set):
Action.__init__ (self, manager, [], None, prop_set)
def actualize (self):
if not self.actualized_:
self.actualized_ = True
for i in self.targets ():
i.actualize ()
class NonScanningAction(Action):
"""Class which acts exactly like 'action', except that the sources
are not scanned for dependencies."""
def __init__(self, sources, action_name, property_set):
#FIXME: should the manager parameter of Action.__init__
#be removed? -- Steven Watanabe
Action.__init__(self, b2.manager.get_manager(), sources, action_name, property_set)
def actualize_source_type(self, sources, property_set):
result = []
for s in sources:
result.append(s.actualize())
return result
def traverse (target, include_roots = False, include_sources = False):
""" Traverses the dependency graph of 'target' and return all targets that will
be created before this one is created. If root of some dependency graph is
found during traversal, it's either included or not, dependencing of the
value of 'include_roots'. In either case, sources of root are not traversed.
"""
result = []
if target.action ():
action = target.action ()
# This includes 'target' as well
result += action.targets ()
for t in action.sources ():
# FIXME:
# TODO: see comment in Manager.register_object ()
#if not isinstance (t, VirtualTarget):
# t = target.project_.manager_.get_object (t)
if not t.root ():
result += traverse (t, include_roots, include_sources)
elif include_roots:
result.append (t)
elif include_sources:
result.append (target)
return result
def clone_action (action, new_project, new_action_name, new_properties):
"""Takes an 'action' instances and creates new instance of it
and all produced target. The rule-name and properties are set
to 'new-rule-name' and 'new-properties', if those are specified.
Returns the cloned action."""
if not new_action_name:
new_action_name = action.action_name()
if not new_properties:
new_properties = action.properties()
cloned_action = action.__class__(action.manager_, action.sources(), new_action_name,
new_properties)
cloned_targets = []
for target in action.targets():
n = target.name()
# Don't modify the name of the produced targets. Strip the directory f
cloned_target = FileTarget(n, target.type(), new_project,
cloned_action, exact=True)
d = target.dependencies()
if d:
cloned_target.depends(d)
cloned_target.root(target.root())
cloned_target.creating_subvariant(target.creating_subvariant())
cloned_targets.append(cloned_target)
return cloned_action
class Subvariant:
def __init__ (self, main_target, prop_set, sources, build_properties, sources_usage_requirements, created_targets):
"""
main_target: The instance of MainTarget class
prop_set: Properties requested for this target
sources:
build_properties: Actually used properties
sources_usage_requirements: Properties propagated from sources
created_targets: Top-level created targets
"""
self.main_target_ = main_target
self.properties_ = prop_set
self.sources_ = sources
self.build_properties_ = build_properties
self.sources_usage_requirements_ = sources_usage_requirements
self.created_targets_ = created_targets
self.usage_requirements_ = None
# Pre-compose the list of other dependency graphs, on which this one
# depends
deps = build_properties.get('<implicit-dependency>')
self.other_dg_ = []
for d in deps:
self.other_dg_.append(d.creating_subvariant ())
self.other_dg_ = unique (self.other_dg_)
self.implicit_includes_cache_ = {}
self.target_directories_ = None
def main_target (self):
return self.main_target_
def created_targets (self):
return self.created_targets_
def requested_properties (self):
return self.properties_
def build_properties (self):
return self.build_properties_
def sources_usage_requirements (self):
return self.sources_usage_requirements_
def set_usage_requirements (self, usage_requirements):
self.usage_requirements_ = usage_requirements
def usage_requirements (self):
return self.usage_requirements_
def all_referenced_targets(self, result):
"""Returns all targets referenced by this subvariant,
either directly or indirectly, and either as sources,
or as dependency properties. Targets referred with
dependency property are returned a properties, not targets."""
# Find directly referenced targets.
deps = self.build_properties().dependency()
all_targets = self.sources_ + deps
# Find other subvariants.
r = []
for e in all_targets:
if not e in result:
result.add(e)
if isinstance(e, property.Property):
t = e.value()
else:
t = e
# FIXME: how can this be?
cs = t.creating_subvariant()
if cs:
r.append(cs)
r = unique(r)
for s in r:
if s != self:
s.all_referenced_targets(result)
def implicit_includes (self, feature, target_type):
""" Returns the properties which specify implicit include paths to
generated headers. This traverses all targets in this subvariant,
and subvariants referred by <implcit-dependecy>properties.
For all targets which are of type 'target-type' (or for all targets,
if 'target_type' is not specified), the result will contain
<$(feature)>path-to-that-target.
"""
if not target_type:
key = feature
else:
key = feature + "-" + target_type
result = self.implicit_includes_cache_.get(key)
if not result:
target_paths = self.all_target_directories(target_type)
target_paths = unique(target_paths)
result = ["<%s>%s" % (feature, p) for p in target_paths]
self.implicit_includes_cache_[key] = result
return result
def all_target_directories(self, target_type = None):
# TODO: does not appear to use target_type in deciding
# if we've computed this already.
if not self.target_directories_:
self.target_directories_ = self.compute_target_directories(target_type)
return self.target_directories_
def compute_target_directories(self, target_type=None):
result = []
for t in self.created_targets():
if not target_type or b2.build.type.is_derived(t.type(), target_type):
result.append(t.path())
for d in self.other_dg_:
result.extend(d.all_target_directories(target_type))
result = unique(result)
return result
| 38.180518 | 120 | 0.553085 |
856b1a37d65971b47afece31886ae288d771c654 | 16,824 | py | Python | train_results_individualslices.py | aikonbrasil/RL_on_Slicing | fe6cdb064826480172fd2bef35423f1339dd9af7 | [
"MIT"
]
| null | null | null | train_results_individualslices.py | aikonbrasil/RL_on_Slicing | fe6cdb064826480172fd2bef35423f1339dd9af7 | [
"MIT"
]
| null | null | null | train_results_individualslices.py | aikonbrasil/RL_on_Slicing | fe6cdb064826480172fd2bef35423f1339dd9af7 | [
"MIT"
]
| 1 | 2021-08-07T10:49:36.000Z | 2021-08-07T10:49:36.000Z | # -*- coding: utf-8 -*-
"""
@author: anonymous
"""
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
import json
import matplotlib
#matplotlib.use('Qt5Agg')
import argparse
import csv
import scipy.io as extmatlab
def main(scenario):
json_file = scenario['json_file']
json_file_policy = scenario['json_file_policy']
json_file_CS = scenario['json_file_CS']
json_file_policy2 = scenario['json_file_policy2']
json_file_CS2 = scenario['json_file_CS2']
num_sim = scenario['num_sim']
with open ('./config/deployment/'+json_file+'.json','r') as f:
options = json.load(f)
## Kumber of samples
total_samples = options['simulation']['total_samples']
K = options['simulation']['K']
N = options['simulation']['N']
M = options['simulation']['M']
# PFS set to true means that we save log average sum-rate instead of sum-rate
pfs = False
if'pfs' in options['simulation']:
pfs = options['simulation']['pfs']
beta = 0.01
if num_sim == -1:
num_simulations = options['simulation']['num_simulations']
simulation = options['simulation']['simulation_index_start']
else:
num_simulations = 1
simulation = num_sim
# simulation parameters
mobility_params = options['mobility_params']
mobility_params['alpha_angle'] = options['mobility_params']['alpha_angle_rad'] * np.pi #radian/sec
history = 250
mean_p_FP = np.zeros(total_samples)
mean_time_FP = np.zeros(total_samples)
mean_iterations_FP = np.zeros(total_samples)
mean_sum_rate_FP = np.zeros(total_samples)
mean_sum_rate_FPMulti_delayedbyone = np.zeros(total_samples)
mean_sum_rate_randomCS_idealFP = np.zeros(total_samples)
mean_sum_rate_randomCS_randomP = np.zeros(total_samples)
mean_sum_rate_policy_train_innersims = np.zeros(total_samples)
mean_p_strategy_all_train_innersims = np.zeros(total_samples)
mean_time_optimization_at_each_slot_takes = []
mean_time_calculating_strategy_takes = []
mean_sum_rate_policy_train_innersims2 = np.zeros(total_samples)
mean_p_strategy_all_train_innersims2 = np.zeros(total_samples)
#Custom add Slicing
mean_interference_policy_train_sims2 = np.zeros(total_samples)
mean_time_optimization_at_each_slot_takes2 = []
mean_time_calculating_strategy_takes2 = []
mean_sum_rate_policy_train_indivslices = np.zeros((total_samples, 1, M))
for overal_sims in range(simulation,simulation+num_simulations):
# Get the benchmarks.
file_path = './simulations/sumrate/benchmarks/%s_network%d'%(json_file,overal_sims)
data = np.load(file_path+'.npz')
p_FP = data['arr_0']
alpha_FP = data['arr_1']
time_stats_FP = data['arr_2']
sum_rate_FP = data['arr_3']
sum_rate_FPMulti_delayedbyone= data['arr_4']
sum_rate_randomCS_idealFP = data['arr_5']
sum_rate_randomCS_randomP = data['arr_5']
file_path = './simulations/sumrate/train/%s_%s_%s_network%d.ckpt'%(json_file,json_file_policy,json_file_CS,overal_sims)
data = np.load(file_path+'.npz')
# Get the train policy results
sum_rate_policy_train = data['arr_2']
p_strategy_all = data['arr_3']
alpha_strategy_all = data['arr_4']
time_optimization_at_each_slot_takes = data['arr_5']
time_calculating_strategy_takes = data['arr_6']
# Average
mean_p_FP = mean_p_FP + np.sum(p_FP,1)/float(num_simulations)
mean_time_FP = mean_time_FP + time_stats_FP[:,0]/float(num_simulations)
mean_iterations_FP = mean_iterations_FP + time_stats_FP[:,1]/float(num_simulations)
mean_sum_rate_FP = mean_sum_rate_FP + sum_rate_FP/float(num_simulations)
mean_sum_rate_FPMulti_delayedbyone = mean_sum_rate_FPMulti_delayedbyone + sum_rate_FPMulti_delayedbyone/float(num_simulations)
mean_sum_rate_randomCS_idealFP = mean_sum_rate_randomCS_idealFP + sum_rate_randomCS_idealFP/float(num_simulations)
mean_sum_rate_randomCS_randomP = mean_sum_rate_randomCS_randomP + sum_rate_randomCS_randomP/float(num_simulations)
mean_sum_rate_policy_train_innersims = mean_sum_rate_policy_train_innersims + sum_rate_policy_train/float(num_simulations)
mean_p_strategy_all_train_innersims = mean_p_strategy_all_train_innersims + np.sum(p_strategy_all,1)/float(num_simulations)
mean_time_optimization_at_each_slot_takes.append(time_optimization_at_each_slot_takes)
mean_time_calculating_strategy_takes.append(time_calculating_strategy_takes)
print('reading file with potential error path...')
print(json_file_CS2)
print(json_file_policy2)
# file_path = './simulations/sumrate/train/%s_%s_%s_network%d.ckpt'%(json_file,json_file_policy2,json_file_CS2,overal_sims)
# data = np.load(file_path+'.npz')
# Get the train policy results
sum_rate_policy_train2 = data['arr_2']
p_strategy_all2 = data['arr_3']
alpha_strategy_all2 = data['arr_4']
time_optimization_at_each_slot_takes2 = data['arr_5']
time_calculating_strategy_takes2 = data['arr_6']
interference_info = data['arr_7']
individual_slices = data['arr_8']
mean_sum_rate_policy_train_innersims2 = mean_sum_rate_policy_train_innersims2 + sum_rate_policy_train2/float(num_simulations)
mean_p_strategy_all_train_innersims2 = mean_p_strategy_all_train_innersims2 + np.sum(p_strategy_all2,1)/float(num_simulations)
#CUSTOM ADD Slicing
mean_interference_policy_train_sims2 = mean_interference_policy_train_sims2 + interference_info/float(num_simulations)
mean_sum_rate_policy_train_indivslices = mean_sum_rate_policy_train_indivslices + individual_slices / float(num_simulations)
print(type(mean_sum_rate_policy_train_indivslices))
mean_time_optimization_at_each_slot_takes2.append(time_optimization_at_each_slot_takes2)
mean_time_calculating_strategy_takes2.append(time_calculating_strategy_takes2)
if pfs:
bw = 1e7
add_bw = np.log(bw)
mean_sum_rate_FP = add_bw + mean_sum_rate_FP
mean_sum_rate_FPMulti_delayedbyone = add_bw + mean_sum_rate_FPMulti_delayedbyone
mean_sum_rate_randomCS_idealFP = add_bw + mean_sum_rate_randomCS_idealFP
mean_sum_rate_randomCS_randomP = add_bw + mean_sum_rate_randomCS_randomP
mean_sum_rate_policy_train_innersims = add_bw + mean_sum_rate_policy_train_innersims
avg_result_over = 1
else:
avg_result_over = float(N)
#print('K '+ str(int(N))+' R '+str(R_defined)+ ' r '+str(min_dist) + ' '+file_path[14:18])
#print('Test Sum rate optimal ' + str(np.mean(mean_sum_rate[total_samples-2500:]/N)))
#print('Test Sum rate delayed ' + str(np.mean(mean_sum_rate_FPMulti_delayedbyone[total_samples-2500:]/N)))
#print('Test Sum rate random ' + str(np.mean(mean_sum_rate_randomCS_idealFP[total_samples-2500:]/N)))
#print('Test Sum rate max ' + str(np.mean(mean_sum_rate_randomCS_randomP[total_samples-2500:]/N)))
#for i in range(len(power_multiplier_allsims)):
# print('Multiplier '+str(power_multiplier_allsims[i])+
# ' Test Sum rate ' +str(np.mean(mean_sum_rate_policy_train_innersims[i,total_samples-2500:]/N)))
lines = ["-","--",':','-.',':','-.']
linecycler = cycle(lines)
history = 100
fig = plt.figure()
t=np.arange(0,total_samples,10)
sum_rate_performance_FP = []
sum_rate_performance_random = []
sum_rate_performance_max = []
sum_rate_performance_FPMulti_delayedbyone = []
sum_rate_performance_policy = []
sum_rate_performance_wmmse = []
sum_rate_performance_policy = []
sum_rate_performance_policy2 = []
#Custom Add Slicing
interference_performance_policy2 = []
interference_performance_policy2_db = []
individualslices_performance_policy2 = []
ep_start = 0
for i in range(len(t)):
if t[i] % options['train_episodes']['T_train'] == 0:
ep_start = t[i]
sum_rate_performance_FP.append(np.mean(mean_sum_rate_FP[max(ep_start,t[i]-history):t[i]]))
sum_rate_performance_random.append(np.mean(mean_sum_rate_randomCS_idealFP[max(ep_start,t[i]-history):t[i]]))
sum_rate_performance_max.append(np.mean(mean_sum_rate_randomCS_randomP[max(ep_start,t[i]-history):t[i]]))
sum_rate_performance_FPMulti_delayedbyone.append(np.mean(mean_sum_rate_FPMulti_delayedbyone[max(ep_start,t[i]-history):t[i]]))
sum_rate_performance_policy.append(np.mean(mean_sum_rate_policy_train_innersims[max(ep_start,t[i]-history):t[i]]))
sum_rate_performance_policy2.append(np.mean(mean_sum_rate_policy_train_innersims2[max(ep_start,t[i]-history):t[i]]))
#CUSTOM ADD Slicing
interference_performance_policy2.append(
np.mean(mean_interference_policy_train_sims2[max(ep_start, t[i] - history):t[i]]))
info = np.mean(mean_interference_policy_train_sims2[max(ep_start, t[i] - history):t[i]])
infodb = 10 * np.log(1000 * info)
interference_performance_policy2_db.append(infodb)
info_individ_slices = mean_sum_rate_policy_train_indivslices[:,0,::]
print(type(info_individ_slices))
#print(t[i])
print(max(ep_start, t[i] - history))
print(t[i])
info_individ_slices_other = mean_sum_rate_policy_train_indivslices[max(ep_start, t[i] - history):t[i],0,:]
mean_info_indiv_slices_other = np.mean(info_individ_slices_other, axis=0)
list1 = mean_info_indiv_slices_other.tolist()
# np.reshape(mean_info_indiv_slices_other, (M,1))
individualslices_performance_policy2.append((list1))
# sum_rate_performance_policy2.append(
# np.mean(mean_sum_rate_policy_train_innersims2[max(ep_start, t[i] - history):t[i]]))
# interference_performance_policy2_db.append(10 * np.log10(1000 * np.mean(mean_interference_policy_train_sims2[max(ep_start, t[i] - history):t[i]]))))
#plt.figure(figsize=(5,5))
print(type(individualslices_performance_policy2))
t=np.arange(0,total_samples,10)
#plt.plot(t, np.array(sum_rate_performance_policy)/avg_result_over, label='proposed',linestyle=next(linecycler))# with Multiplier '+str(power_multiplier_allsims[i]),linestyle=next(linecycler))
#plt.plot(t, np.array(sum_rate_performance_policy2)/avg_result_over, label='joint learning',linestyle=next(linecycler))# with Multiplier '+str(power_multiplier_allsims[i]),linestyle=next(linecycler))
#plt.plot(t, np.array(sum_rate_performance_FP)/avg_result_over, label='ideal FP',linestyle=next(linecycler))
#plt.plot(t, np.array(sum_rate_performance_FPMulti_delayedbyone)/avg_result_over, label='delayed FP',linestyle=next(linecycler))
# plt.plot(t, np.array(sum_rate_performance_random)/avg_result_over, label='random',linestyle=next(linecycler))
# plt.plot(t, np.array(sum_rate_performance_max)/avg_result_over,'c', label='random CS random P',linestyle=next(linecycler))
# plt.plot(t, np.array(sum_rate_performance_random)/avg_result_over, linestyle=next(linecycler))
#CUSTOM ADD Slicing
plt.plot(np.array(individualslices_performance_policy2)/avg_result_over , label='Spectral Efficiency per slice', linestyle=next(linecycler)) #
plt.xlabel('training iterations')
if not pfs:
plt.ylabel('average system interference (bps/Hz) per link')
else:
plt.ylabel('sum log average rate (ln(bps))')
plt.grid(True)
plt.legend(loc=4)
plt.tight_layout()
plt.savefig('./fig/se_slices_individual%s_network_%d'%(json_file,overal_sims)+'.pdf', format='pdf', dpi=1000)
plt.savefig('./fig/se_slices_individual%s_network_%d'%(json_file,overal_sims)+'.png', format='png', dpi=1000)
plt.show(block=False)
# Average performance of the last 200 training slots.
history = 200
print('Deployment: %s; policy: %s; K: %d; N: %d'%(json_file,json_file_policy,N,K))
print('Averages for last %d episodes:'%(history))
if not pfs:
res_label = 'Sum rate per link'
else:
res_label = 'Sum log average rate'
print('%s - proposed: %.2f'%(res_label,np.mean(mean_sum_rate_policy_train_innersims[total_samples-history:])/avg_result_over))
print('%s - joint learning: %.2f'%(res_label,np.mean(mean_sum_rate_policy_train_innersims2[total_samples-history:])/avg_result_over))
print('%s - FP: %.2f'%(res_label,np.mean(mean_sum_rate_FP[total_samples-history:])/avg_result_over))
print('%s - FP Multi delayed: %.2f'%(res_label,np.mean(mean_sum_rate_FPMulti_delayedbyone[total_samples-history:])/avg_result_over))
print('%s - random: %.2f'%(res_label,np.mean(mean_sum_rate_randomCS_idealFP[total_samples-history:])/avg_result_over))
print('%s - full: %.2f'%(res_label,np.mean(mean_sum_rate_randomCS_randomP[total_samples-history:])/avg_result_over))
# Average time statistics
# print('Average time for an FP run: %.2f ms'%(1000 * np.mean(mean_time_FP)))
# print('Average time for a policy agent to determine its action %.2f ms'%(1000 * np.mean(mean_time_calculating_strategy_takes)))
# print('Average time for a policy mini-batch train %.2f ms'%(1000 * np.mean(mean_time_optimization_at_each_slot_takes)))
# print('2 Average time for a policy agent to determine its action %.2f ms'%(1000 * np.mean(mean_time_calculating_strategy_takes2)))
# print('2 Average time for a policy mini-batch train %.2f ms'%(1000 * np.mean(mean_time_optimization_at_each_slot_takes2)))
print('Average FP iterations per run: %.2f'%(np.mean(mean_iterations_FP)))
#np_save_path = './fig/interference%s_network_%d'%(json_file,overal_sims)+'.dat'
#print(np_save_path)
#np.savez(np_save_path, t, interference_performance_policy2_db)
# sizeiteration = len(interference_performance_policy2_db)
# with open('./fig/interference%s_network_%d'%(json_file,overal_sims)+'.csv', mode='w') as employee_file:
# employee_writer = csv.writer(employee_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# for ele in range(sizeiteration):
# if str(interference_performance_policy2_db[ele]) != 'nan':
# employee_writer.writerow([t[ele], interference_performance_policy2_db[ele]])
extmatlab.savemat('./fig/se_slices_individual%s_network_%d'%(json_file,overal_sims)+'.mat', mdict={'se_slices': np.array(individualslices_performance_policy2)/avg_result_over })
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='give test scenarios.')
parser.add_argument('--json-file', type=str, default='train_K5_N20_M1_shadow10_episode4-5000_travelIND_fd10',
help='json file for the deployment')
parser.add_argument('--json-file-policy', type=str, default='ddpg200_100_50',
help='json file for the hyperparameters')
parser.add_argument('--json-file-CS', type=str, default='dqn100_50_50',
help='json file for the hyperparameters')
parser.add_argument('--json-file-policy2', type=str, default='dqn200_200_100',
help='json file for the hyperparameters')
parser.add_argument('--json-file-CS2', type=str, default='dqn200_200_100',
help='json file for the hyperparameters')
parser.add_argument('--num-sim', type=int, default=0,
help='If set to -1, it uses num_simulations of the json file. If set to positive, it runs one simulation with the given id.')
args = parser.parse_args()
test_scenario = {'json_file':args.json_file,
'json_file_policy':args.json_file_policy,
'json_file_CS':args.json_file_CS,
'json_file_policy2':args.json_file_policy2,
'json_file_CS2':args.json_file_CS2,
'num_sim':args.num_sim}
scenario = test_scenario
main(scenario)
| 50.371257 | 204 | 0.683785 |
e3f6c8a6322c1d518d5f6f8ced3a4d703b6b3d72 | 2,319 | py | Python | EthanBrown.SublimeText2.WebPackages/tools/PackageCache/SublimeLinter/sublimelinter/modules/perl.py | michaelray/Iristyle-ChocolateyPackages | 5051538253ff095af4b64d469137b23420f28be0 | [
"MIT"
]
| 18 | 2015-01-14T13:36:47.000Z | 2020-10-22T19:53:57.000Z | sublimelinter/modules/perl.py | davgit/SublimeLinter | f1f3a9f66bc3d5f8ebb1671ad36963e9121a5074 | [
"MIT"
]
| 12 | 2015-04-13T13:56:14.000Z | 2017-02-04T08:35:35.000Z | sublimelinter/modules/perl.py | davgit/SublimeLinter | f1f3a9f66bc3d5f8ebb1671ad36963e9121a5074 | [
"MIT"
]
| 30 | 2015-01-20T12:32:53.000Z | 2019-01-26T12:39:02.000Z | # -*- coding: utf-8 -*-
# perl.py - sublimelint package for checking perl files
import re
import subprocess
from base_linter import BaseLinter
CONFIG = {
'language': 'Perl'
}
class Linter(BaseLinter):
PERLCRITIC_RE = re.compile(r'\[(?P<pbp>.+)\] (?P<error>.+?) at line (?P<line>\d+), column (?P<column>\d+).+?')
PERL_RE = re.compile(r'(?P<error>.+?) at .+? line (?P<line>\d+)(, near "(?P<near>.+?)")?')
def __init__(self, config):
super(Linter, self).__init__(config)
self.linter = None
def get_executable(self, view):
self.linter = view.settings().get('perl_linter', 'perlcritic')
if self.linter == 'perl':
linter_name = 'Perl'
else:
linter_name = 'Perl::Critic'
try:
path = self.get_mapped_executable(view, self.linter)
subprocess.call([path, '--version'], startupinfo=self.get_startupinfo())
return (True, path, 'using {0}'.format(linter_name))
except OSError:
return (False, '', '{0} is required'.format(linter_name))
def get_lint_args(self, view, code, filename):
if self.linter == 'perl':
return ['-c']
else:
return ['--verbose', '8']
def parse_errors(self, view, errors, lines, errorUnderlines, violationUnderlines, warningUnderlines, errorMessages, violationMessages, warningMessages):
for line in errors.splitlines():
if self.linter == 'perl':
match = self.PERL_RE.match(line)
else:
match = self.PERLCRITIC_RE.match(line)
if match:
error, line = match.group('error'), match.group('line')
lineno = int(line)
if self.linter == 'perl':
near = match.group('near')
if near:
error = '{0}, near "{1}"'.format(error, near)
self.underline_regex(view, lineno, '(?P<underline>{0})'.format(re.escape(near)), lines, errorUnderlines)
else:
column = match.group('column')
column = int(column) - 1
self.underline_word(view, lineno, column, errorUnderlines)
self.add_message(lineno, lines, error, errorMessages)
| 35.136364 | 156 | 0.550668 |
f3096cdbe1da1c36fed741281d01ec91cd5cab99 | 3,834 | py | Python | networking_bagpipe/bagpipe_bgp/bgp_daemon.py | daespinel/networking-bagpipe-1 | 7e96cc651394813c1dc80747186b6cfcaa173f14 | [
"Apache-2.0"
]
| 29 | 2015-11-09T21:47:52.000Z | 2022-01-25T16:03:17.000Z | networking_bagpipe/bagpipe_bgp/bgp_daemon.py | openstack/networking-bagpipe-l2 | d472fb7b5d05b70f9f4e12288eee1a9a01fdc9fd | [
"Apache-2.0"
]
| null | null | null | networking_bagpipe/bagpipe_bgp/bgp_daemon.py | openstack/networking-bagpipe-l2 | d472fb7b5d05b70f9f4e12288eee1a9a01fdc9fd | [
"Apache-2.0"
]
| 9 | 2015-11-17T08:24:32.000Z | 2020-10-25T18:59:48.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging as python_logging
import signal
import sys
from oslo_config import cfg
from oslo_log import log as logging
import pbr.version
from neutron.common import config as n_config # noqa
from networking_bagpipe.bagpipe_bgp.api import api
from networking_bagpipe.bagpipe_bgp.api import config as api_config
from networking_bagpipe.bagpipe_bgp.common import config
from networking_bagpipe.bagpipe_bgp.engine import exabgp_peer_worker
from networking_bagpipe.bagpipe_bgp.vpn import dataplane_drivers as drivers
LOG = logging.getLogger(__name__)
def setup_config():
api_config.register_config()
config.register()
cfg.CONF(args=sys.argv[1:],
project='bagpipe-bgp',
default_config_files=['/etc/bagpipe-bgp/bgp.conf'],
version=('%%(prog)s %s' %
pbr.version.VersionInfo('networking-bagpipe')
.release_string()))
BAGPIPE_BGP_MODULE = "networking_bagpipe.bagpipe_bgp"
def setup_logging():
# even in debug mode we don't want to much talk from these
extra_log_level_defaults = [
'%s.engine.exabgp_peer_worker.exabgp=INFO' % BAGPIPE_BGP_MODULE,
'%s.common.looking_glass=WARNING' % BAGPIPE_BGP_MODULE,
'%s.engine.route_table_manager=INFO' % BAGPIPE_BGP_MODULE,
'ovsdbapp.backend.ovs_idl.vlog=INFO',
]
logging.set_defaults(default_log_levels=(logging.get_default_log_levels() +
extra_log_level_defaults))
logging.setup(cfg.CONF, "bagpipe-bgp")
def fix_log_file():
# assist transition from past bagpipe-bgp version which were
# using --log-file to specify the location of a file to configure logging
if (cfg.CONF.log_file and cfg.CONF.log_file.endswith('.conf')):
cfg.CONF.log_file = None
return ("now using oslo.log, specifying a log configuration file "
"should be done with --log-config-append")
def daemon_main():
logging.register_options(cfg.CONF)
setup_config()
log_file_warn = fix_log_file()
setup_logging()
if log_file_warn:
LOG.warning(log_file_warn)
exabgp_peer_worker.setup_exabgp_env()
try:
LOG.info("Starting bagpipe-bgp...")
pecan_api = api.PecanAPI()
cfg.CONF.log_opt_values(LOG, logging.INFO)
def stop(signum, _):
LOG.info("Received signal %d, stopping...", signum)
pecan_api.stop()
LOG.info("All threads now stopped...")
sys.exit(0)
signal.signal(signal.SIGTERM, stop)
signal.signal(signal.SIGINT, stop)
pecan_api.run()
except Exception:
LOG.exception("Error while starting BGP daemon")
def cleanup_main():
logging.register_options(cfg.CONF)
setup_config()
fix_log_file()
setup_logging()
python_logging.root.name = "[BgpDataplaneCleaner]"
for vpn_type, dataplane_driver in (
drivers.instantiate_dataplane_drivers().items()):
LOG.info("Cleaning dataplane for %s...", vpn_type)
dataplane_driver.reset_state()
LOG.info("BGP component dataplanes have been cleaned up.")
if __name__ == '__main__':
daemon_main()
| 29.267176 | 79 | 0.691445 |
203abe1bfbdd57f76bc43df415fa9c12d5619ca5 | 315 | py | Python | config.py | nicfro/brownian_motion | 03db2d9a2527b6ee9c6004960cc71da302a1fe2e | [
"MIT"
]
| null | null | null | config.py | nicfro/brownian_motion | 03db2d9a2527b6ee9c6004960cc71da302a1fe2e | [
"MIT"
]
| null | null | null | config.py | nicfro/brownian_motion | 03db2d9a2527b6ee9c6004960cc71da302a1fe2e | [
"MIT"
]
| null | null | null | settings = {"velocity_min": 1,
"velocity_max": 3,
"x_boundary": 800,
"y_boundary": 800,
"small_particle_radius": 5,
"big_particle_radius": 10,
"number_of_particles": 500,
"density_min": 2,
"density_max": 20
} | 31.5 | 39 | 0.47619 |
9ec3328f482770e5f1af0357233920a8760f88a4 | 923 | py | Python | kikit/defs.py | patrykf03/KiKit | dc85f67fb9e209c9b511ec84672c0809e203f8ea | [
"MIT"
]
| 784 | 2020-04-08T08:26:38.000Z | 2022-03-31T12:56:27.000Z | kikit/defs.py | patrykf03/KiKit | dc85f67fb9e209c9b511ec84672c0809e203f8ea | [
"MIT"
]
| 257 | 2020-04-07T20:29:05.000Z | 2022-03-29T21:15:50.000Z | kikit/defs.py | patrykf03/KiKit | dc85f67fb9e209c9b511ec84672c0809e203f8ea | [
"MIT"
]
| 110 | 2020-04-09T09:19:26.000Z | 2022-03-30T15:04:32.000Z | from enum import Enum, IntEnum
# These classes miss in the exported interface
class Layer(IntEnum):
F_Cu = 0
B_Cu = 31
B_Adhes = 32
F_Adhes = 33
B_Paste = 34
F_Paste = 35
B_SilkS = 36
F_SilkS = 37
B_Mask = 38
F_Mask = 39
Dwgs_User = 40
Cmts_User = 41
Eco1_User = 42
Eco2_User = 43
Edge_Cuts = 44
Margin = 45
B_CrtYd = 46
F_CrtYd = 47
B_Fab = 48
F_Fab = 49
class STROKE_T(IntEnum):
S_SEGMENT = 0
S_RECT = 1
S_ARC = 2
S_CIRCLE = 3
S_POLYGON = 4
S_CURVE = 5
class EDA_TEXT_HJUSTIFY_T(IntEnum):
GR_TEXT_HJUSTIFY_LEFT = -1
GR_TEXT_HJUSTIFY_CENTER = 0
GR_TEXT_HJUSTIFY_RIGHT = 1
class EDA_TEXT_VJUSTIFY_T(IntEnum):
GR_TEXT_VJUSTIFY_TOP = -1
GR_TEXT_VJUSTIFY_CENTER = 0
GR_TEXT_VJUSTIFY_BOTTOM = 1
class MODULE_ATTR_T(IntEnum):
MOD_DEFAULT = 0,
MOD_CMS = 1
MOD_VIRTUAL = 2
| 18.46 | 46 | 0.631636 |
44f12c7525f7650a6540436cd5f6f1f1f39b7347 | 169 | py | Python | della/inbox/forms.py | tdidechkin/della-django | a035a34fd3b9a5bd1aec872c0f48508b836d53b2 | [
"MIT"
]
| 50 | 2016-11-02T12:46:52.000Z | 2021-06-10T14:28:19.000Z | della/inbox/forms.py | tdidechkin/della-django | a035a34fd3b9a5bd1aec872c0f48508b836d53b2 | [
"MIT"
]
| 13 | 2016-11-08T16:53:18.000Z | 2017-05-10T17:59:57.000Z | della/inbox/forms.py | tdidechkin/della-django | a035a34fd3b9a5bd1aec872c0f48508b836d53b2 | [
"MIT"
]
| 21 | 2016-11-08T16:40:22.000Z | 2021-10-14T17:34:24.000Z | from django.forms import ModelForm
from .models import Message
class MessageCreateForm(ModelForm):
class Meta:
model = Message
fields = ['text']
| 15.363636 | 35 | 0.680473 |
17aee985a13459c2b9259841b25648e101f019ea | 6,301 | py | Python | tracext/git/future27.py | brokenbeatnik/trac-git-plugin | 722342ef03639415d7a1dc0230239d34cb97d988 | [
"BSD-3-Clause"
]
| 3 | 2015-06-03T02:34:35.000Z | 2016-07-31T21:54:07.000Z | tracext/git/future27.py | brokenbeatnik/trac-git-plugin | 722342ef03639415d7a1dc0230239d34cb97d988 | [
"BSD-3-Clause"
]
| 2 | 2015-02-19T09:02:19.000Z | 2019-05-22T09:14:13.000Z | tracext/git/future27.py | brokenbeatnik/trac-git-plugin | 722342ef03639415d7a1dc0230239d34cb97d988 | [
"BSD-3-Clause"
]
| 3 | 2015-02-19T05:25:41.000Z | 2017-09-26T07:47:09.000Z | # Copyright (C) 2001-2010 Python Software Foundation; All Rights Reserved
# This file contains backports for Python 2.5 based on Python 2.7's standard library
__all__ = ['namedtuple']
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
#################################################################
# collections.namedtuple
try:
# try to use the standard library's namedtuple...
from collections import namedtuple
except ImportError:
# use namedtuple backport
# the factory function
def namedtuple(typename, field_names, verbose=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(cls, %(argtxt)s):
return tuple.__new__(cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = self._make(map(kwds.pop, %(field_names)r, self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = property(itemgetter(%d))\n' % (name, i)
if verbose:
print template
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(itemgetter=_itemgetter, __name__='namedtuple_%s' % typename)
try:
exec template in namespace
except SyntaxError, e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals['__name__']
return result
############################################################################
# unit test
if __name__ == '__main__':
# verify that instances can be pickled
from cPickle import loads, dumps
Point = namedtuple('Point', 'x, y', True)
p = Point(x=10, y=20)
assert p == loads(dumps(p))
# test and demonstrate ability to override methods
class Point(namedtuple('Point', 'x y')):
__slots__ = ()
@property
def hypot(self):
return (self.x ** 2 + self.y ** 2) ** 0.5
def __str__(self):
return 'Point: x=%6.3f y=%6.3f hypot=%6.3f' % (self.x, self.y, self.hypot)
for p in Point(3, 4), Point(14, 5/7.):
print p
class Point(namedtuple('Point', 'x y')):
'Point class with optimized _make() and _replace() without error-checking'
__slots__ = ()
_make = classmethod(tuple.__new__)
def _replace(self, _map=map, **kwds):
return self._make(_map(kwds.get, ('x', 'y'), self))
print Point(11, 22)._replace(x=100)
Point3D = namedtuple('Point3D', Point._fields + ('z',))
print Point3D.__doc__
import doctest
TestResults = namedtuple('TestResults', 'failed attempted')
print TestResults(*doctest.testmod())
| 39.879747 | 126 | 0.595778 |
d7f86e4ab470623dddc5e15eff8071f847e02c77 | 119 | py | Python | bbs_answers/admin.py | TakeshiOkamoto/mpp_bbs_dj | 0a91c9bf3e1450dc6d062f0beeba263faa33b1e3 | [
"MIT"
]
| null | null | null | bbs_answers/admin.py | TakeshiOkamoto/mpp_bbs_dj | 0a91c9bf3e1450dc6d062f0beeba263faa33b1e3 | [
"MIT"
]
| null | null | null | bbs_answers/admin.py | TakeshiOkamoto/mpp_bbs_dj | 0a91c9bf3e1450dc6d062f0beeba263faa33b1e3 | [
"MIT"
]
| null | null | null | from django.contrib import admin
# Register your models here.
from .models import Answer
admin.site.register(Answer)
| 17 | 32 | 0.798319 |
4edb147f3aa73a878f6420dbf286c7f61bbc8d23 | 5,072 | py | Python | supplementary/simulations/nestablish_DNM_d0.05_s0.20/sim.py | mmosmond/rescueCoalescent | 568e3d214d86268bded740612f6fd22129fe34b4 | [
"MIT"
]
| 1 | 2019-10-21T13:06:45.000Z | 2019-10-21T13:06:45.000Z | supplementary/simulations/nestablish_DNM_d0.05_s0.20/sim.py | mmosmond/rescueCoalescent | 568e3d214d86268bded740612f6fd22129fe34b4 | [
"MIT"
]
| null | null | null | supplementary/simulations/nestablish_DNM_d0.05_s0.20/sim.py | mmosmond/rescueCoalescent | 568e3d214d86268bded740612f6fd22129fe34b4 | [
"MIT"
]
| null | null | null | import os #to run SLiM from python
N0 = 1e4 #intial population size and carrying capacity
d = 0.05 #decline rate
s = 0.2 #selection coefficient
h = 0.5 #dominance coefficient
B = 2 #number of offspring per parent
us = [10**(i/4) for i in range(-24,-11,2)] #mutation rates at selected site to explore
m = 0 #migration rate
datadir = "data" #location to put output files
k = 0 #initial number of beneficial alleles
nreps = 100 #number of replicates per k value (number of runs where allele fixes and population recovers)
maxt = 1000 #maximum number of generations (safestop that should never be reached)
#for each u value
for u in us:
# for each replicate
for i in range(nreps):
script = """
initialize() {
defineConstant("N0", %d); //initial pop size (integer)
defineConstant("d", %f); // wildtype decline rate [0,1]
defineConstant("s", %f); //beneficial selection coefficient ([0,1]; s>d for rescue to be possible)
defineConstant("h", %f); //beneficial dominance [0,1]
defineConstant("B", %d); //offspring per parent (positive integer; must be great than 1 for possible persistence)
defineConstant("u", %.8f); //mutation rate at beneficial locus [0,1]
defineConstant("m", %.8f); //migration rate [0,1]
defineConstant("k", %d); //initial number of mutants
defineConstant("outfile_nestablish", "%s/nestablish_u%.6f.txt"); //where to save dynamics
defineConstant("simID", getSeed()); //get the random seed to label temporary file
initializeSLiMModelType("nonWF"); //non Wright Fisher model
initializeMutationType("m1", h, "f", s); //beneficial mutation characteristics
m1.mutationStackPolicy = "f"; //keep first mutation
initializeGenomicElementType("g1", m1, 1.0); //define element g1 to have beneficial mutations
initializeGenomicElement(g1, 0, 0); //element g1 is just one site
initializeMutationRate(u, 0); //mutation rate per site
initializeRecombinationRate(0); //per basepair recombination rate
}
reproduction() { //occurs immediately before early events
for (i in 1:B) //B matings per parent
subpop.addCrossed(individual, subpop.sampleIndividuals(1)); //random mating, 1 offspring per pair
}
//discrete generations, hard carrying capacity, census and update fitness
1:%d early() {
//initialize population
if (sim.generation == 1) {
sim.addSubpop("p1", N0); //initialize population of wildtypes
target = sample(p1.genomes, k); //choose k chromosomes without replacement...
for (i in target)
i.addNewDrawnMutation(m1, 0); //... and give beneficial mutation
sim.outputFull("/tmp/slim_" + simID + ".txt"); //output this initial state to use for future runs if needed
}
//enforce discrete generations
inds = sim.subpopulations.individuals; //get info on all individuals
inds[inds.age > 0].fitnessScaling = 0.0; //parents all die (discrete generations)
//hard carrying capacity by random culling
off = inds[inds.age == 0]; //offspring
N = length(off); //total number of offspring
indices = which(inds.age == 0); //indices of offspring
if (N > N0) { //if more than N0...
inds[sample(indices, N-N0)].fitnessScaling = 0.0; //...kill a random subset to reduce N to N0
off = inds[inds.fitnessScaling > 0]; //get surviving offspring
}
// migration
if (m>0 & N>0) { //if adapting from migration and some offspring made
if (runif(1)<m) { //with probability m
target = sample(off.genomes, 1); //choose a chromosome to add a migrant allele to
target.addNewDrawnMutation(m1, 0); //add the migrant allele
}
}
// census offspring
N = length(off); //population size
freq = sum(asInteger(off.genomes.countOfMutationsOfType(m1)>0))/(2*N); //frequency of beneficial mutation
n = length(unique(off.genomes.mutations)); //number of unique copies remaining
if ((u==0 & m==0 & freq == 0) | (N==0)) { //if fail to adapt
writeFile(outfile_nestablish, "0", append=T); //record number that establish
catn("all hope was lost in generation " + sim.generation + " - RESTARTING"); //alert the user of restart
sim.readFromPopulationFile("/tmp/slim_" + simID + ".txt"); //restart simulation
}
else {
if (freq > 0.9 & N == N0) { //if mutation (essentially) fixed and population recovered
writeFile(outfile_nestablish, asString(n), append=T); //record number that establish
catn("rescue complete in generation " + sim.generation); //alert the uer of rescue
sim.simulationFinished(); //end simulation
}
}
//fitness scaling (viability selection occurs after early events)
p1.fitnessScaling = (1.0 - d)/B; //survival probability V = X(1-d)/B, where X is the fitness effect of the selected site (X=1 for wildtype, X=1+s*h for heterozygotes, X=1+s for mutant homozygotes)
}
//backup: end simulation if runs too long
%d late () {
catn("times up, make maxt bigger");
sim.simulationFinished();
}
""" %(N0,d,s,h,B,u,m,k,datadir,u,maxt,maxt+1)
os.system("echo '" + script + "' | slim") #run script in SLiM
| 45.693694 | 199 | 0.683162 |
c9e08dec9fa715ce53a70f011ffa6a11343e7950 | 6,270 | py | Python | zeus/pubsub/server.py | conrad-kronos/zeus | ddb6bc313e51fb22222b30822b82d76f37dbbd35 | [
"Apache-2.0"
]
| 221 | 2017-07-03T17:29:21.000Z | 2021-12-07T19:56:59.000Z | zeus/pubsub/server.py | conrad-kronos/zeus | ddb6bc313e51fb22222b30822b82d76f37dbbd35 | [
"Apache-2.0"
]
| 298 | 2017-07-04T18:08:14.000Z | 2022-03-03T22:24:51.000Z | zeus/pubsub/server.py | conrad-kronos/zeus | ddb6bc313e51fb22222b30822b82d76f37dbbd35 | [
"Apache-2.0"
]
| 24 | 2017-07-15T13:46:45.000Z | 2020-08-16T16:14:45.000Z | import aioredis
import asyncio
import io
import json
from aiohttp.web import Application, Response, StreamResponse
from collections import namedtuple
from functools import wraps
from flask import current_app
from urllib.parse import urlparse
from uuid import uuid4
import sentry_sdk
from zeus import auth
from zeus.utils.sentry import span
Event = namedtuple("Event", ["id", "event", "data"])
def is_valid_origin(request):
allowed_origins = current_app.config.get("ALLOWED_ORIGINS")
if allowed_origins is None:
return request.url.host == current_app.config["DOMAIN"]
return request.url.host in allowed_origins
def log_errors(func):
@wraps(func)
def wrapper(*args, **kwargs):
async def tmp():
try:
return await func(*args, **kwargs)
except asyncio.CancelledError:
raise
except Exception as e:
current_app.logger.exception(str(e))
raise
return tmp()
return wrapper
@span("worker")
@log_errors
async def worker(channel, queue, tenant, repo_ids=None, build_ids=None):
allowed_repo_ids = frozenset(tenant.access.keys())
while await channel.wait_message():
msg = await channel.get_json()
data = msg.get("data")
if data["repository"]["id"] not in allowed_repo_ids:
continue
if build_ids and data["id"] not in build_ids:
continue
if repo_ids and data["repository"]["id"] not in repo_ids:
continue
evt = Event(msg.get("id"), msg.get("event"), data)
with sentry_sdk.Hub.current.start_span(
op="pubsub.receive", description=msg.get("id")
):
await queue.put(evt)
current_app.logger.debug("pubsub.event.received qsize=%s", queue.qsize())
@span("ping")
@log_errors
async def ping(loop, resp, client_guid):
# periodically send ping to the browser. Any message that
# starts with ":" colon ignored by a browser and could be used
# as ping message.
while True:
await asyncio.sleep(15, loop=loop)
current_app.logger.debug("pubsub.ping guid=%s", client_guid)
with sentry_sdk.Hub.current.start_span(op="pubsub.ping"):
resp.write(b": ping\r\n\r\n")
@span("stream")
@log_errors
async def stream(request):
client_guid = str(uuid4())
with sentry_sdk.configure_scope() as scope:
scope.set_tag("client_guid", client_guid)
if request.headers.get("accept") != "text/event-stream":
return Response(status=406)
if request.method != "GET":
return Response(status=405)
token = request.query.get("token")
if not token:
return Response(status=401)
tenant = auth.get_tenant_from_signed_token(token)
if not tenant:
return Response(status=401)
build_ids = frozenset(request.query.get("build") or [])
# TODO(dcramer): we could validate this here
repo_ids = frozenset(request.query.get("repo") or [])
if getattr(tenant, "user_id", None):
with sentry_sdk.configure_scope() as scope:
scope.user = {"id": tenant.user_id}
current_app.logger.debug(
"pubsub.client.connected guid=%s tenant=%s", client_guid, token
)
loop = request.app.loop
parts = urlparse(current_app.config["REDIS_URL"])
with sentry_sdk.Hub.current.start_span(
op="aioredis.create_redis",
description=f'{parts.hostname or "localhost"}:{parts.port or "6379"}',
):
conn = await aioredis.create_redis(
address=(parts.hostname or "localhost", parts.port or "6379"),
db=parts.path.split("1", 1)[:-1] or 0,
password=parts.password,
loop=loop,
)
try:
queue = asyncio.Queue(loop=loop)
with sentry_sdk.Hub.current.start_span(
op="aioredis.subscribe", description="builds"
):
res = await conn.subscribe("builds")
asyncio.ensure_future(worker(res[0], queue, tenant, repo_ids, build_ids))
resp = StreamResponse(status=200, reason="OK")
resp.headers["Content-Type"] = "text/event-stream"
resp.headers["Cache-Control"] = "no-cache"
resp.headers["Connection"] = "keep-alive"
if "Origin" in request.headers and is_valid_origin(request):
resp.headers["Access-Control-Allow-Origin"] = request.headers.get("Origin")
resp.headers["Access-Control-Expose-Headers"] = "*"
resp.enable_chunked_encoding()
await resp.prepare(request)
# loop.create_task(ping(loop, resp, client_guid))
# resp.write(b'retry: 100\r\n\r\n')
while True:
event = await queue.get()
if event is None:
break
with sentry_sdk.Hub.current.start_span(
op="pubsub.send", description=event.id
):
buffer = io.BytesIO()
if event.id:
buffer.write(b"id: %s\r\n" % (event.id.encode("utf-8"),))
if event.event:
buffer.write(b"event: %s\r\n" % (event.event.encode("utf-8")))
if event.data:
buffer.write(
b"data: %s\r\n" % (json.dumps(event.data).encode("utf-8"))
)
buffer.write(b"\r\n")
resp.write(buffer.getvalue())
queue.task_done()
current_app.logger.debug("pubsub.event.sent qsize=%s", queue.qsize())
# Yield to the scheduler so other processes do stuff.
await resp.drain()
await resp.write_eof()
return resp
finally:
conn.close()
await conn.wait_closed()
current_app.logger.debug(
"client.disconnected guid=%s", client_guid, exc_info=True
)
@span("health_check")
async def health_check(request):
return Response(text='{"ok": true}')
async def build_server(loop, host, port):
app = Application(loop=loop, logger=current_app.logger, debug=current_app.debug)
app.router.add_route("GET", "/", stream)
app.router.add_route("GET", "/healthz", health_check)
return await loop.create_server(app.make_handler(), host, port)
| 30.8867 | 87 | 0.611962 |
a02ff8de5b12c28d27fbb28af5e5414a95cc37c8 | 530 | py | Python | pysbolgraph/S2Experiment.py | zhfanrui/pysbolgraph | c4914705bd9b22a2b69db0fc4d43049fcb07ad17 | [
"BSD-2-Clause"
]
| 4 | 2018-06-29T10:43:08.000Z | 2019-03-27T22:33:33.000Z | pysbolgraph/S2Experiment.py | zhfanrui/pysbolgraph | c4914705bd9b22a2b69db0fc4d43049fcb07ad17 | [
"BSD-2-Clause"
]
| 14 | 2019-01-22T16:03:12.000Z | 2019-11-11T19:05:32.000Z | pysbolgraph/S2Experiment.py | zhfanrui/pysbolgraph | c4914705bd9b22a2b69db0fc4d43049fcb07ad17 | [
"BSD-2-Clause"
]
| 12 | 2018-07-01T10:59:37.000Z | 2021-03-01T08:48:20.000Z |
from .S2Identified import S2Identified
from .terms import Prov
from .terms import SBOL2
from rdflib import URIRef
from rdflib.namespace import RDF
class S2Experiment(S2Identified):
def __init__(self, g, uri):
super(S2Experiment, self).__init__(g, uri)
@property
def experimental_data(self):
return [self.g.uri_to_facade(uri) for uri in self.get_uri_properties(SBOL2.experimentalData)]
def add_experimental_data(self, ed):
self.insert_identified_property(SBOL2.experimentalData, ed)
| 24.090909 | 101 | 0.749057 |
1453a5af872ae03a445f7ab9351ff5e14d812e58 | 1,090 | py | Python | setup.py | ivanrvpereira/mllp-http | a2c527ece4159c6b3e9ab19b13475be86df30921 | [
"MIT"
]
| 6 | 2020-11-12T19:24:52.000Z | 2022-02-04T15:32:02.000Z | setup.py | ivanrvpereira/mllp-http | a2c527ece4159c6b3e9ab19b13475be86df30921 | [
"MIT"
]
| 8 | 2021-02-15T15:00:42.000Z | 2022-02-19T00:36:50.000Z | setup.py | ivanrvpereira/mllp-http | a2c527ece4159c6b3e9ab19b13475be86df30921 | [
"MIT"
]
| 3 | 2021-03-09T16:45:24.000Z | 2021-08-25T08:47:33.000Z | #!/usr/bin/env python3
import os
import setuptools
version = {}
with open("mllp_http/version.py", "r") as f:
exec(f.read(), version)
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
author="Rivet Health",
author_email="[email protected]",
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
],
description="Translate between MLLP and HTTP",
entry_points={
"console_scripts": [
"mllp2http=mllp_http.main:mllp2http",
"http2mllp=mllp_http.main:http2mllp",
]
},
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=["requests"],
name="mllp-http",
packages=setuptools.find_packages(),
project_urls={
"Issues": "https://github.com/rivethealth/mllp-http/issues",
},
url="https://github.com/rivethealth/mllp-http",
version=version["__version__"],
)
| 27.948718 | 68 | 0.640367 |
78553ae2885e4caac2be1618c6082936929065c2 | 2,803 | py | Python | python/GafferUI/Spacer.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
]
| 561 | 2016-10-18T04:30:48.000Z | 2022-03-30T06:52:04.000Z | python/GafferUI/Spacer.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
]
| 1,828 | 2016-10-14T19:01:46.000Z | 2022-03-30T16:07:19.000Z | python/GafferUI/Spacer.py | ddesmond/gaffer | 4f25df88103b7893df75865ea919fb035f92bac0 | [
"BSD-3-Clause"
]
| 120 | 2016-10-18T15:19:13.000Z | 2021-12-20T16:28:23.000Z | ##########################################################################
#
# Copyright (c) 2011, John Haddon. All rights reserved.
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferUI
from Qt import QtWidgets
## \todo Size accessors
class Spacer( GafferUI.Widget ) :
## Spacers range their size between `size` and `maximumSize`,
# optionally requesting that if possible they would prefer to be
# at a specific size in between.
## \todo Rename size to minimumSize. We're just keeping the name
# for backwards compatibility for now.
def __init__( self, size, maximumSize=None, preferredSize=None, **kw ) :
GafferUI.Widget.__init__( self, QtWidgets.QWidget(), **kw )
if preferredSize is not None :
layout = QtWidgets.QHBoxLayout()
layout.setContentsMargins( 0, 0, 0, 0 )
self._qtWidget().setLayout( layout )
layout.addSpacerItem(
QtWidgets.QSpacerItem( preferredSize.x, preferredSize.y, QtWidgets.QSizePolicy.Preferred )
)
self._qtWidget().setMinimumSize( size.x, size.y )
if maximumSize is not None :
self._qtWidget().setMaximumSize( maximumSize.x, maximumSize.y )
| 42.469697 | 94 | 0.699964 |
7ae1e20bc239e92f069139819ad1cb808bf74b39 | 17,381 | py | Python | tests/strings_test.py | adbmd/vaex | 48531b5d0ff3b8010809dc422f7e67555f0ad79b | [
"MIT"
]
| 1 | 2020-08-31T17:53:01.000Z | 2020-08-31T17:53:01.000Z | tests/strings_test.py | adbmd/vaex | 48531b5d0ff3b8010809dc422f7e67555f0ad79b | [
"MIT"
]
| null | null | null | tests/strings_test.py | adbmd/vaex | 48531b5d0ff3b8010809dc422f7e67555f0ad79b | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
import sys
import re
import vaex
import numpy as np
import pyarrow as pa
import pytest
@pytest.mark.skipif(vaex.utils.osname == 'windows',
reason="windows' snprintf seems buggy")
def test_format():
num1 = np.array([1, 2, 3], dtype=np.int32)
num2 = np.array([1.1, 2.2, 3.3], dtype=np.float32)
text = ['Here', 'we', 'go']
df = vaex.from_arrays(num1=num1, num2=num2, text=text)
assert df.num1.format("%d").tolist() == ['1', '2', '3']
assert df.num1.format("%04d").tolist() == ['0001', '0002', '0003']
assert df.num2.format('%f').tolist() == ['1.100000', '2.200000', '3.300000']
assert df.num2.format('%05.2f').tolist() == ['01.10', '02.20', '03.30']
assert df.text.format('pre-%s-post').tolist() == ['pre-%s-post' % k for k in text]
@pytest.mark.skipif(sys.version_info < (3, 3), reason="requires python3.4 or higher")
def test_dtype_object_string(tmpdir):
x = np.arange(8, 12)
s = np.array(list(map(str, x)), dtype='O')
df = vaex.from_arrays(x=x, s=s)
assert df.columns['s'].dtype.kind == 'O'
path = str(tmpdir.join('test.arrow'))
df.export(path)
df_read = vaex.open(path, as_numpy=False)
# the data type of x is different (arrow vs numpy)
assert df_read.compare(df) == ([], [], ['x'], [])
def test_export_arrow_strings_to_hdf5(tmpdir):
df = vaex.from_arrays(names=np.array(['hi', 'is', 'l2', np.nan], dtype='O'))
path = str(tmpdir.join('test.arrow'))
df.export(path)
df_read_arrow = vaex.open(path, as_numpy=False)
path = str(tmpdir.join('test.hdf5'))
df.export(path)
df_read_hdf5 = vaex.open(path)
assert df_read_hdf5.compare(df_read_arrow) == ([], [], [], [])
def test_arrow_strings_concat(tmpdir):
df = vaex.from_arrays(names=['hi', 'is', 'l2'])
path = str(tmpdir.join('test.arrow'))
df.export(path)
df_read_arrow = vaex.open(path, as_numpy=False)
path = str(tmpdir.join('test.hdf5'))
df_read_arrow.export(path)
df_read_hdf5 = vaex.open(path)
assert df_read_hdf5.compare(df_read_arrow) == ([], [], [], [])
def test_concat():
ds1 = vaex.from_arrays(names=['hi', 'is', 'l2'])
ds2 = vaex.from_arrays(names=['hello', 'this', 'is', 'long'])
ds = ds1.concat(ds2)
assert len(ds) == len(ds1) + len(ds2)
assert ds.data_type('names') == pa.string()
assert ds.data_type('names') != np.object
def test_string_count_stat():
ds = vaex.from_arrays(names=['hello', 'this', 'is', 'long'])
assert ds.count(ds.names) == 4
ds = vaex.from_arrays(names=np.ma.array(['hello', 'this', 'is', 'long'], mask=[0, 0, 1, 0]))
assert ds.count(ds.names) == 3
df = vaex.from_arrays(names=np.array(['hi', 'is', 'l2', np.nan], dtype='O'))
assert df.count(ds.names) == 3
names = vaex.string_column(['hello', 'this', None, 'long'])
x = np.arange(len(names))
df = vaex.from_arrays(names=names, x=x)
assert df.count(df.names, binby='x', limits=[0, 100], shape=1).tolist() == [3]
@pytest.mark.skip
def test_string_dtype_with_none():
ds = vaex.from_arrays(names=['hello', 'this', 'is', None])
assert ds.count(ds.names) == 3
def test_unicode():
ds = vaex.from_arrays(names=['bla\u1234'])
assert ds.names.dtype == pa.string()
ds = vaex.from_arrays(names=['bla'])
assert ds.names.dtype == pa.string()
@pytest.mark.skipif(sys.version_info < (3, 3), reason="requires python3.4 or higher")
def test_concat_mixed():
# this can happen when you want to concat multiple csv files
# and pandas makes one have nans, since they all have missing values
# and the other string
ds1 = vaex.from_arrays(names=['not', 'missing'])
ds2 = vaex.from_arrays(names=[np.nan, np.nan])
assert ds1.data_type(ds1.names) == pa.string()
assert ds2.data_type(ds2.names) == np.float64
ds = ds1.concat(ds2)
assert len(ds) == len(ds1) + len(ds2)
assert ds.data_type(ds.names) == ds1.names.dtype
def test_strip():
ds = vaex.from_arrays(names=['this ', ' has', ' space'])
ds['stripped'] = ds.names.str.strip()
ds.stripped.tolist() == ['this', 'has', 'space']
@pytest.mark.skipif(sys.version_info < (3, 3), reason="requires python3.4 or higher")
def test_unicode2(tmpdir):
path = str(tmpdir.join('utf32.hdf5'))
ds = vaex.from_arrays(names=["vaex", "or", "væx!"])
assert ds.names.dtype == pa.string()
ds.export_hdf5(path)
ds = vaex.open(path)
assert ds.names.dtype == pa.string()
assert ds.names.tolist() == ["vaex", "or", "væx!"]
@pytest.fixture(params=['dfs_arrow', 'dfs_array'])
def dfs(request, dfs_arrow, dfs_array):
named = dict(dfs_arrow=dfs_arrow, dfs_array=dfs_array)
return named[request.param]
string_list = ["vaex", " \tor", "VæX! ", "vaex or VÆX!", "Æ and", "æ are weird", "12", "æ", "a1", "a1æ", "\t "]
unicode_compat = lambda x: x
try:
unicode
unicode_compat = lambda x: x.decode('utf8')
string_list = map(unicode_compat, string_list)
except NameError:
pass
string_list_reverse = string_list[::-1]
@pytest.fixture(scope='session')
def dfs_arrow(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("vaex")
path = str(tmpdir / 'strings.hdf5')
df = vaex.from_arrays(s=vaex.string_column(string_list), sr=vaex.string_column(string_list_reverse))
df.export(path) # we write it out so that the memory is read only
return vaex.open(path)
return df
def test_null_values():
df = vaex.from_arrays(s=vaex.string_column(['aap', None, 'mies']), x=[0, 1, 2])
assert df.count() == 3
assert df.count(df.s) == 2
assert df.count(df.s, selection=df.x > 0) == 1
@pytest.fixture()
def dfs_array():
return vaex.from_arrays(s=np.array(string_list, dtype='O'), sr=np.array(string_list_reverse, dtype='O'))
def test_byte_length(dfs):
assert dfs.s.str.byte_length().tolist() == [len(k.encode('utf8')) for k in string_list]
def test_string_capitalize(dfs):
assert dfs.s.str.capitalize().tolist() == dfs.s.str_pandas.capitalize().tolist()
def test_string_cat(dfs):
c = [s1+s2 for s1, s2 in zip(string_list, string_list_reverse)]
assert dfs.s.str.cat(dfs.sr).tolist() == c
@pytest.mark.xfail(reason='pandas does not like getting an arrow array as argument')
def test_string_cat(dfs):
c = [s1+s2 for s1, s2 in zip(string_list, string_list_reverse)]
assert dfs.s.str_pandas.cat(dfs.sr).tolist() == c
def test_string_contains(dfs):
assert dfs.s.str.contains('v', regex=False).tolist() == [True, False, False, True, False, False, False, False, False, False, False]
assert dfs.s.str.contains('æ', regex=False).tolist() == [False, False, True, False, False, True, False, True, False, True, False]
assert dfs.s.str.contains('Æ', regex=False).tolist() == [False, False, False, True, True, False, False, False, False, False, False]
@pytest.mark.parametrize("width", [2, 10])
def test_string_center(dfs, width):
assert dfs.s.str.center(width).tolist() == dfs.s.str_pandas.center(width).tolist()
def test_string_counts(dfs):
assert dfs.s.str.count("v", regex=False).tolist() == dfs.s.str_pandas.count("v").tolist()
assert dfs.s.str.count("[va]", regex=True).tolist() == dfs.s.str_pandas.count("[va]").tolist()
def test_string_endswith(dfs):
assert dfs.s.str.endswith("x").tolist() == dfs.s.str_pandas.endswith("x").tolist()
@pytest.mark.parametrize("sub", ["v", unicode_compat("æ")])
@pytest.mark.parametrize("start", [0, 3, 5])
@pytest.mark.parametrize("end", [-1, 3, 5, 10])
def test_string_find(dfs, sub, start, end):
assert dfs.s.str.find(sub, start, end).tolist() == dfs.s.str_pandas.find(sub, start, end).tolist()
@pytest.mark.parametrize("i", [-1, 3, 5, 10])
def test_string_get(dfs, i):
x = dfs.s.str_pandas.get(i).values.tolist()
assert dfs.s.str.get(i).tolist() == [k[i] if i < len(k) else '' for k in string_list]
@pytest.mark.parametrize("sub", ["v", "æ"])
@pytest.mark.parametrize("start", [0, 3, 5])
@pytest.mark.parametrize("end", [-1, 3, 5, 10])
def test_string_index(dfs, sub, start, end):
assert dfs.s.str.find(sub, start, end).tolist() == dfs.s.str.index(sub, start, end).tolist()
@pytest.mark.parametrize("pattern", [None, ' '])
def test_string_join(dfs, pattern):
assert dfs.s.str.split(pattern).str.join('-').tolist() == dfs.s.str.split(pattern).str.join('-').tolist()
def test_string_len(dfs):
assert dfs.s.str.len().astype('i4').tolist() == [len(k) for k in string_list]
assert dfs.s.str_pandas.len().astype('i4').tolist() == [len(k) for k in string_list]
@pytest.mark.parametrize("width", [2, 10])
def test_string_ljust(dfs, width):
assert dfs.s.str.ljust(width).tolist() == dfs.s.str_pandas.ljust(width).tolist()
def test_string_lower(dfs):
assert dfs.s.str.lower().tolist() == dfs.s.str_pandas.lower().tolist()
def test_string_lstrip(dfs):
assert dfs.s.str.lstrip().tolist() == dfs.s.str_pandas.lstrip().tolist()
assert dfs.s.str.lstrip('vV ').tolist() == dfs.s.str_pandas.lstrip('vV ').tolist()
def test_string_match(dfs):
assert dfs.s.str.match('^v.*').tolist() == dfs.s.str_pandas.match('^v.*').tolist()
assert dfs.s.str.match('^v.*').tolist() == [k.startswith('v') for k in string_list]
# TODO: normalize
@pytest.mark.parametrize("width", [2, 10])
@pytest.mark.parametrize("side", ['left', 'right', 'both'])
def test_string_pad(dfs, width, side):
assert dfs.s.str.pad(width, side=side).tolist() == dfs.s.str_pandas.pad(width, side=side).tolist()
# TODO: partition
@pytest.mark.parametrize("repeats", [1, 3])
def test_string_repeat(dfs, repeats):
assert dfs.s.str.repeat(repeats).tolist() == dfs.s.str_pandas.repeat(repeats).tolist()
@pytest.mark.parametrize("pattern", ["v", " ", unicode_compat("VæX")])
@pytest.mark.parametrize("replacement", ["?", unicode_compat("VæX")])
@pytest.mark.parametrize("n", [-1, 1])
def test_string_replace(dfs, pattern, replacement, n):
assert dfs.s.str.replace(pattern, replacement, n).tolist() == dfs.s.str_pandas.replace(pattern, replacement, n).tolist()
@pytest.mark.parametrize("pattern", ["v", " "])
@pytest.mark.parametrize("replacement", ["?", unicode_compat("VæX")])
@pytest.mark.parametrize("flags", [0, int(re.IGNORECASE)])
def test_string_replace_regex(dfs, pattern, replacement, flags):
assert dfs.s.str.replace(pattern, replacement, flags=flags, regex=True).tolist() == \
dfs.s.str_pandas.replace(pattern, replacement, flags=flags, regex=True).tolist()
@pytest.mark.xfail(reason='unicode not supported fully in regex')
@pytest.mark.parametrize("pattern", [unicode_compat("VæX")])
@pytest.mark.parametrize("replacement", ["?", unicode_compat("VæX")])
@pytest.mark.parametrize("flags", [0, int(re.IGNORECASE)])
def test_string_replace_regex_unicode(dfs, pattern, replacement, flags):
assert dfs.s.str.replace(pattern, replacement, flags=flags, regex=True).tolist() == \
dfs.s.str_pandas.replace(pattern, replacement, flags=flags, regex=True).tolist()
@pytest.mark.parametrize("sub", ["v", unicode_compat("æ")])
@pytest.mark.parametrize("start", [0, 3, 5])
@pytest.mark.parametrize("end", [-1, 3, 5, 10])
def test_string_rfind(dfs, sub, start, end):
assert dfs.s.str.rfind(sub, start, end).tolist() == dfs.s.str_pandas.rfind(sub, start, end).tolist()
@pytest.mark.parametrize("sub", ["v", unicode_compat("æ")])
@pytest.mark.parametrize("start", [0, 3, 5])
@pytest.mark.parametrize("end", [-1, 3, 5, 10])
def test_string_rindex(dfs, sub, start, end):
assert dfs.s.str.rindex(sub, start, end).tolist() == dfs.s.str_pandas.rfind(sub, start, end).tolist()
@pytest.mark.parametrize("width", [2, 10])
def test_string_rjust(dfs, width):
assert dfs.s.str.rjust(width).tolist() == dfs.s.str_pandas.rjust(width).tolist()
def test_string_rstrip(dfs):
assert dfs.s.str.rstrip().tolist() == dfs.s.str_pandas.rstrip().tolist()
assert dfs.s.str.rstrip('x! ').tolist() == dfs.s.str_pandas.rstrip('x! ').tolist()
# @pytest.mark.parametrize("start", [0, 3, 5])
# @pytest.mark.parametrize("end", [-1, 3, 5, 10])
@pytest.mark.parametrize("start", [0, -1, -5, 10])
@pytest.mark.parametrize("end", [None, -1, 3, 1000])
def test_string_slice(dfs, start, end):
assert dfs.s.str.slice(start, end).tolist() == dfs.s.str_pandas.slice(start, end).tolist()
def test_string_startswith(dfs):
assert dfs.s.str.startswith("x").tolist() == dfs.s.str_pandas.startswith("x").tolist()
def test_string_strip(dfs):
assert dfs.s.str.rstrip().tolist() == dfs.s.str_pandas.rstrip().tolist()
assert dfs.s.str.rstrip('vx! ').tolist() == dfs.s.str_pandas.rstrip('vx! ').tolist()
def test_string_title(dfs):
assert dfs.s.str.title().tolist() == dfs.s.str_pandas.title().tolist()
def test_string_lower(dfs):
assert dfs.s.str.lower().tolist() == dfs.s.str_pandas.lower().tolist()
def test_string_upper(dfs):
assert dfs.s.str.upper().tolist() == dfs.s.str_pandas.upper().tolist()
def test_string_isalnum(dfs):
assert dfs.s.str.isalnum().tolist() == dfs.s.str_pandas.isalnum().tolist()
def test_string_isalpha(dfs):
assert dfs.s.str.isalpha().tolist() == dfs.s.str_pandas.isalpha().tolist()
def test_string_isdigit(dfs):
assert dfs.s.str.isdigit().tolist() == dfs.s.str_pandas.isdigit().tolist()
def test_string_isspace(dfs):
assert dfs.s.str.isspace().tolist() == dfs.s.str_pandas.isspace().tolist()
def test_string_islower(dfs):
assert dfs.s.str.islower().tolist() == dfs.s.str_pandas.islower().tolist()
assert dfs.s.str.lower().str.islower().tolist() == dfs.s.str_pandas.lower().str_pandas.islower().tolist()
@pytest.mark.parametrize("ascii", [True, False])
def test_string_istitle(ascii):
df = vaex.from_arrays(s=['Title Case', 'no title'])
assert df.s.str.istitle(ascii=ascii).tolist() == [True, False]
def test_string_isupper(dfs):
assert dfs.s.str.isupper().tolist() == dfs.s.str_pandas.isupper().tolist()
assert dfs.s.str.upper().str.isupper().tolist() == dfs.s.str_pandas.upper().str_pandas.isupper().tolist()
def test_string_isspace(dfs):
assert dfs.s.str.isspace().tolist() == dfs.s.str_pandas.isspace().tolist()
@pytest.mark.parametrize("width", [2, 10])
def test_string_zfill(dfs, width):
assert dfs.s.str.zfill(width).tolist() == dfs.s.str_pandas.zfill(width).tolist()
def test_to_string():
x = np.arange(1, 4, dtype='f4')
df = vaex.from_arrays(x=x)
df['s'] = df.x.to_string()
assert df.s.tolist() == ["%f" % k for k in x]
def test_string_strip_special_case():
strings = ["Explanation\nWhy the edits made under my username Hardcore Metallica Fan were reverted? "
"They weren't vandalisms, just closure on some GAs after I voted at New York Dolls FAC. "
"And please don't remove the template from the talk page since I'm retired now.89.205.38.27"]
df = vaex.from_arrays(s=vaex.string_column(strings))
df.s.str.strip(' ').values # .get(0)
def test_string_strip_special_case2():
strings = ['The eunuch in question left me no choice but to reinsert it. Take action as you see fit.·snunɐw·']
df = vaex.from_arrays(s=vaex.string_column(strings))
assert df.s.str.upper().tolist() == df.s.str_pandas.upper().tolist()
@pytest.mark.xfail(reason='we need to fix this, similar to upper and lower')
def test_string_strip_special_case3():
strings = ['ɐa', 'aap']
df = vaex.from_arrays(s=vaex.string_column(strings))
assert df.s.str.capitalize().tolist() == df.s.str_pandas.capitalize().tolist()
def test_string_slice_repr():
s = ['Here', 'is', 'a', 'simple', 'unit-test']
df = vaex.from_arrays(s=s)
df['sliced_s'] = df.s.str.slice(start=2, stop=5)
repr(df['sliced_s'])
@pytest.mark.skipif(sys.version_info[0] == 2, reason="no support for python2")
@pytest.mark.parametrize("match", ["vaex", "VæX! "])
def test_strings_operator_equals(dfs, match):
assert (dfs.s == match).tolist() == [k == match for k in string_list]
assert (match == dfs.s).tolist() == [k == match for k in string_list]
assert (dfs.s == dfs.s).tolist() == [k == k for k in string_list]
@pytest.mark.skipif(sys.version_info[0] == 2, reason="no support for python2")
@pytest.mark.parametrize("extra", ["vaex", "VæX! "])
def test_strings_operator_plus(dfs, extra):
assert (dfs.s + extra).tolist() == [k + extra for k in string_list]
assert (extra + dfs.s).tolist() == [extra + k for k in string_list]
assert (dfs.s + dfs.s).tolist() == [k + k for k in string_list]
assert (dfs.s + extra + dfs.s).tolist() == [k + extra + k for k in string_list]
def test_masked_string():
s = np.ma.MaskedArray(data=['dog', 'dog', 'cat', 'cat', 'mouse'], mask=[False, False, True, False, True])
df = vaex.from_arrays(s=s)
assert (df.s == 'cat').tolist() == [False, False, False, True, False]
def test_string_operations_from_mmap_file(tmpdir):
# if we write the file to disk and mmap it read only, we trigger invalid memory writes
# see https://github.com/vaexio/vaex/pull/459
x = np.arange(5)
y = np.array(['This', 'is', 'a', None, 'test'])
df = vaex.from_arrays(x=x, y=y)
filename = str(tmpdir / 'test.hdf5')
df.export_hdf5(filename)
df_from_file = vaex.open(filename)
assert df_from_file.y.str.slice(start=0, stop=2).tolist() == ['Th', 'is', 'a', None, 'te']
assert df_from_file.y.str.upper().tolist() == ['THIS', 'IS', 'A', None, 'TEST']
| 38.032823 | 135 | 0.661412 |
6c7776c63343f6c0eae35947bdd3fdb171dd6687 | 4,080 | py | Python | extdirect/django/filter.py | pavelgood/extdirect.django | 58c460e26338521002775b4b038e35a96464fecd | [
"BSD-3-Clause"
]
| 1 | 2015-04-02T13:13:31.000Z | 2015-04-02T13:13:31.000Z | extdirect/django/filter.py | pavelgood/extdirect.django | 58c460e26338521002775b4b038e35a96464fecd | [
"BSD-3-Clause"
]
| null | null | null | extdirect/django/filter.py | pavelgood/extdirect.django | 58c460e26338521002775b4b038e35a96464fecd | [
"BSD-3-Clause"
]
| null | null | null | import operator
import string
import json
import sys
from django.db.models import Q
from django.db.models.fields import FieldDoesNotExist
class QueryParser:
"""
Mongodb like query parser.
Parses query from dict and returns Q-objects.
$lt, $gt, $lte, $gte syntax: { field: {$lt: value} }
$and, $or syntax: { $and: [ { <expression1> }, { <expression2> } , ... , { <expressionN> } ] }
$not syntax: { field: { $not: { <operator-expression> } } }
foreign key fields syntax: fkfield1__fkfield2__......__fkfieldN__filterfield
JSON structure:
filter: { property: "queryfilter", value: { <expression> } }
Value example:
value: { $or: [ { $or:[ {}, {} ...] }, { $not: {} }, {} ] }
"""
#comparision operators
_gt = '$gt'
_lt = '$lt'
_gte = '$gte'
_lte = '$lte'
_in = '$in'
_contains = '$contains'
_icontains = '$icontains'
_exact = '$exact'
_iexact = '$iexact'
_range = '$range'
#logical operators
_or = '$or'
_and = '$and'
_not = '$not'
#list of comparision operators
comparision = [_gt, _gte, _lt, _lte, _in, _contains, _icontains, _exact, _iexact]
#list of logical operators
logical = [_or, _and, _not]
#django's model
model = None
def __init__(self, model):
self.model = model
def parse(self, data, optional=None):
"""
Deserializes json string to python object and parses it.
Returns Q-object.
"""
result = Q()
try:
data = json.loads(data)
result = self._parse_item(data, optional)
except ValueError as e:
print(e)
return result
def _parse_item(self, data, optional):
"""
Parses filter item: { element: expression }
Returns Q-object.
"""
if not isinstance(data, dict):
return Q()
if not len(data):
return Q()
key = data.keys()[0]
if key[0] == '$':
if key in self.logical:
return self._parse_logical(key, data.pop(key), optional)
else:
raise ValueError("Unsupported logical operation %s" % key)
else:
return self._parse_field(key, data[key], optional)
def _parse_logical(self, key, data, optional):
"""
Parses block with logical operation.
Returns Q-object.
"""
if key == self._and:
if not isinstance(data, list):
print("Not a list")
return Q()
qf = list()
for item in data:
obj = self._parse_item(item, optional)
if len(obj) > 0:
qf.append(obj)
if len(qf) > 0:
return reduce(operator.and_, qf)
return Q()
elif key == self._or:
if not isinstance(data, list):
print("Not a list")
return Q()
qf = list()
for item in data:
obj = self._parse_item(item, optional)
if len(obj) > 0:
qf.append(obj)
if len(qf) > 0:
return reduce(operator.or_, qf)
return Q()
elif key == self._not:
obj = self._parse_item(data, optional)
if len(obj) > 0:
return ~obj
else:
pass
return Q()
def _parse_comparision(self, field, key, data, optional):
"""
Returns string value.
"""
return data
def _parse_field(self, field, value, optional):
"""
Returns Q-object.
field - field name
value - field value or expression.
optional - optional parameter.
"""
if isinstance(value, dict):
key = value.keys()[0]
value = self._parse_comparision(field, key, value[key], optional)
return Q((field + '__' + key[1:], value))
else:
return Q((field + '__' + self._iexact[1:], value)) | 28.93617 | 98 | 0.5125 |
d32f740d2dd09a447e5d9691fa5a1af261fa88aa | 8,195 | py | Python | src/chains/tests/test_signals.py | byteflyfunny/safe-config-service | 86af4a0bcfb7538b57218290f72729cbf3e56b78 | [
"MIT"
]
| null | null | null | src/chains/tests/test_signals.py | byteflyfunny/safe-config-service | 86af4a0bcfb7538b57218290f72729cbf3e56b78 | [
"MIT"
]
| null | null | null | src/chains/tests/test_signals.py | byteflyfunny/safe-config-service | 86af4a0bcfb7538b57218290f72729cbf3e56b78 | [
"MIT"
]
| null | null | null | import responses
from django.test import TestCase, override_settings
from chains.models import Feature, Wallet
from chains.tests.factories import ChainFactory, GasPriceFactory
@override_settings(
CGW_URL="http://127.0.0.1",
CGW_FLUSH_TOKEN="example-token",
)
class ChainNetworkHookTestCase(TestCase):
@responses.activate
def test_on_chain_update_hook_200(self) -> None:
responses.add(
responses.POST,
"http://127.0.0.1/v2/flush",
status=200,
match=[
responses.matchers.header_matcher(
{"Authorization": "Basic example-token"}
),
responses.matchers.json_params_matcher({"invalidate": "Chains"}),
],
)
ChainFactory.create()
assert len(responses.calls) == 1
assert responses.calls[0].request.body == b'{"invalidate": "Chains"}'
assert responses.calls[0].request.url == "http://127.0.0.1/v2/flush"
assert (
responses.calls[0].request.headers.get("Authorization")
== "Basic example-token"
)
@responses.activate
def test_on_chain_update_hook_400(self) -> None:
responses.add(
responses.POST,
"http://127.0.0.1/v2/flush",
status=400,
match=[
responses.matchers.header_matcher(
{"Authorization": "Basic example-token"}
),
responses.matchers.json_params_matcher({"invalidate": "Chains"}),
],
)
ChainFactory.create()
assert len(responses.calls) == 1
@responses.activate
def test_on_chain_update_hook_500(self) -> None:
responses.add(
responses.POST,
"http://127.0.0.1/v2/flush",
status=500,
match=[
responses.matchers.header_matcher(
{"Authorization": "Basic example-token"}
),
responses.matchers.json_params_matcher({"invalidate": "Chains"}),
],
)
ChainFactory.create()
assert len(responses.calls) == 1
@responses.activate
def test_on_chain_delete_hook_call(self) -> None:
chain = ChainFactory.create()
chain.delete()
# 2 calls: one for creation and one for deletion
assert len(responses.calls) == 2
@responses.activate
def test_on_chain_update_hook_call(self) -> None:
chain = ChainFactory.create()
# Not updating using queryset because hooks are not triggered that way
chain.currency_name = "Ether"
chain.save()
# 2 calls: one for creation and one for updating
assert len(responses.calls) == 2
@override_settings(
CGW_URL=None,
CGW_FLUSH_TOKEN=None,
)
@responses.activate
def test_on_chain_update_with_no_cgw_set(self) -> None:
ChainFactory.create()
assert len(responses.calls) == 0
@override_settings(
CGW_URL="http://127.0.0.1",
CGW_FLUSH_TOKEN=None,
)
@responses.activate
def test_on_chain_update_with_no_flush_token_set(self) -> None:
ChainFactory.create()
assert len(responses.calls) == 0
@override_settings(
CGW_URL="http://127.0.0.1",
CGW_FLUSH_TOKEN="example-token",
)
class FeatureHookTestCase(TestCase):
@responses.activate
def test_on_feature_create_hook_call(self) -> None:
responses.add(
responses.POST,
"http://127.0.0.1/v2/flush",
status=200,
match=[
responses.matchers.header_matcher(
{"Authorization": "Basic example-token"}
),
responses.matchers.json_params_matcher({"invalidate": "Chains"}),
],
)
Feature(key="Test Feature").save()
assert len(responses.calls) == 1
assert responses.calls[0].request.body == b'{"invalidate": "Chains"}'
assert responses.calls[0].request.url == "http://127.0.0.1/v2/flush"
assert (
responses.calls[0].request.headers.get("Authorization")
== "Basic example-token"
)
@responses.activate
def test_on_feature_delete_hook_call(self) -> None:
feature = Feature(key="Test Feature")
feature.save() # create
feature.delete() # delete
# 2 calls: one for creation and one for deletion
assert len(responses.calls) == 2
@responses.activate
def test_on_feature_update_hook_call(self) -> None:
feature = Feature(key="Test Feature")
feature.save() # create
feature.key = "New Test Feature"
feature.save() # update
# 2 calls: one for creation and one for updating
assert len(responses.calls) == 2
@override_settings(
CGW_URL="http://127.0.0.1",
CGW_FLUSH_TOKEN="example-token",
)
class WalletHookTestCase(TestCase):
@responses.activate
def test_on_wallet_create_hook_call(self) -> None:
responses.add(
responses.POST,
"http://127.0.0.1/v2/flush",
status=200,
match=[
responses.matchers.header_matcher(
{"Authorization": "Basic example-token"}
),
responses.matchers.json_params_matcher({"invalidate": "Chains"}),
],
)
Wallet(key="Test Wallet").save()
assert len(responses.calls) == 1
assert responses.calls[0].request.body == b'{"invalidate": "Chains"}'
assert responses.calls[0].request.url == "http://127.0.0.1/v2/flush"
assert (
responses.calls[0].request.headers.get("Authorization")
== "Basic example-token"
)
@responses.activate
def test_on_wallet_delete_hook_call(self) -> None:
wallet = Wallet(key="Test Wallet")
wallet.save() # create
wallet.delete() # delete
# 2 calls: one for creation and one for deletion
assert len(responses.calls) == 2
@responses.activate
def test_on_wallet_update_hook_call(self) -> None:
wallet = Wallet(key="Test Wallet")
wallet.save() # create
wallet.key = "Test Wallet v2"
wallet.save() # update
# 2 calls: one for creation and one for updating
assert len(responses.calls) == 2
@override_settings(
CGW_URL="http://127.0.0.1",
CGW_FLUSH_TOKEN="example-token",
)
class GasPriceHookTestCase(TestCase):
def setUp(self) -> None:
self.chain = (
ChainFactory.create()
) # chain creation: a GasPrice requires a chain
@responses.activate
def test_on_gas_price_create_hook_call(self) -> None:
responses.add(
responses.POST,
"http://127.0.0.1/v2/flush",
status=200,
match=[
responses.matchers.header_matcher(
{"Authorization": "Basic example-token"}
),
responses.matchers.json_params_matcher({"invalidate": "Chains"}),
],
)
GasPriceFactory.create(chain=self.chain)
assert len(responses.calls) == 1
assert responses.calls[0].request.body == b'{"invalidate": "Chains"}'
assert responses.calls[0].request.url == "http://127.0.0.1/v2/flush"
assert (
responses.calls[0].request.headers.get("Authorization")
== "Basic example-token"
)
@responses.activate
def test_on_gas_price_delete_hook_call(self) -> None:
gas_price = GasPriceFactory.create(chain=self.chain) # create
gas_price.delete() # delete
# 2 calls: one for creation and one for deletion
assert len(responses.calls) == 2
@responses.activate
def test_on_gas_price_update_hook_call(self) -> None:
gas_price = GasPriceFactory.create(
chain=self.chain, fixed_wei_value=1000
) # create
gas_price.fixed_wei_value = 2000
gas_price.save() # update
# 2 calls: one for creation and one for updating
assert len(responses.calls) == 2
| 30.464684 | 81 | 0.588408 |
1e0cb873594b42e970958dc96375fe5179d63c7f | 11,297 | py | Python | python_scripts/ensemble_gradient_boosting.py | ph4ge/scikit-learn-mooc | a8d0feded54d987fe88480e7ade7d218be69019e | [
"CC-BY-4.0"
]
| null | null | null | python_scripts/ensemble_gradient_boosting.py | ph4ge/scikit-learn-mooc | a8d0feded54d987fe88480e7ade7d218be69019e | [
"CC-BY-4.0"
]
| null | null | null | python_scripts/ensemble_gradient_boosting.py | ph4ge/scikit-learn-mooc | a8d0feded54d987fe88480e7ade7d218be69019e | [
"CC-BY-4.0"
]
| null | null | null | # %% [markdown]
# # Gradient-boosting decision tree (GBDT)
#
# In this notebook, we will present the gradient boosting decision tree
# algorithm and contrast it with AdaBoost.
#
# Gradient-boosting differs from AdaBoost due to the following reason: instead
# of assigning weights to specific samples, GBDT will fit a decision tree on
# the residuals error (hence the name "gradient") of the previous tree.
# Therefore, each new tree in the ensemble predicts the error made by the
# previous learner instead of predicting the target directly.
#
# In this section, we will provide some intuition about the way learners are
# combined to give the final prediction. In this regard, let's go back to our
# regression problem which is more intuitive for demonstrating the underlying
# machinery.
# %%
import pandas as pd
import numpy as np
# Create a random number generator that will be used to set the randomness
rng = np.random.RandomState(0)
def generate_data(n_samples=50):
"""Generate synthetic dataset. Returns `data_train`, `data_test`,
`target_train`."""
x_max, x_min = 1.4, -1.4
len_x = x_max - x_min
x = rng.rand(n_samples) * len_x - len_x / 2
noise = rng.randn(n_samples) * 0.3
y = x ** 3 - 0.5 * x ** 2 + noise
data_train = pd.DataFrame(x, columns=["Feature"])
data_test = pd.DataFrame(np.linspace(x_max, x_min, num=300),
columns=["Feature"])
target_train = pd.Series(y, name="Target")
return data_train, data_test, target_train
data_train, data_test, target_train = generate_data()
# %%
import matplotlib.pyplot as plt
import seaborn as sns
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
_ = plt.title("Synthetic regression dataset")
# %% [markdown]
# As we previously discussed, boosting will be based on assembling a sequence
# of learners. We will start by creating a decision tree regressor. We will set
# the depth of the tree so that the resulting learner will underfit the data.
# %%
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor(max_depth=3, random_state=0)
tree.fit(data_train, target_train)
target_train_predicted = tree.predict(data_train)
target_test_predicted = tree.predict(data_test)
# %% [markdown]
# Using the term "test" here refers to data that was not used for training.
# It should not be confused with data coming from a train-test split, as it
# was generated in equally-spaced intervals for the visual evaluation of the
# predictions.
# %%
# plot the data
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
# plot the predictions
line_predictions = plt.plot(data_test, target_test_predicted, "--")
# plot the residuals
for value, true, predicted in zip(data_train["Feature"],
target_train,
target_train_predicted):
lines_residuals = plt.plot([value, value], [true, predicted], color="red")
plt.legend([line_predictions[0], lines_residuals[0]],
["Fitted tree", "Residuals"])
_ = plt.title("Prediction function together \nwith errors on the training set")
# %% [markdown]
# ```{tip}
# In the cell above, we manually edited the legend to get only a single label
# for all the residual lines.
# ```
# Since the tree underfits the data, its accuracy is far from perfect on the
# training data. We can observe this in the figure by looking at the difference
# between the predictions and the ground-truth data. We represent these errors,
# called "Residuals", by unbroken red lines.
#
# Indeed, our initial tree was not expressive enough to handle the complexity
# of the data, as shown by the residuals. In a gradient-boosting algorithm, the
# idea is to create a second tree which, given the same data `data`, will try
# to predict the residuals instead of the vector `target`. We would therefore
# have a tree that is able to predict the errors made by the initial tree.
#
# Let's train such a tree.
# %%
residuals = target_train - target_train_predicted
tree_residuals = DecisionTreeRegressor(max_depth=5, random_state=0)
tree_residuals.fit(data_train, residuals)
target_train_predicted_residuals = tree_residuals.predict(data_train)
target_test_predicted_residuals = tree_residuals.predict(data_test)
# %%
sns.scatterplot(x=data_train["Feature"], y=residuals, color="black", alpha=0.5)
line_predictions = plt.plot(data_test, target_test_predicted_residuals, "--")
# plot the residuals of the predicted residuals
for value, true, predicted in zip(data_train["Feature"],
residuals,
target_train_predicted_residuals):
lines_residuals = plt.plot([value, value], [true, predicted], color="red")
plt.legend([line_predictions[0], lines_residuals[0]],
["Fitted tree", "Residuals"])
_ = plt.title("Prediction of the previous residuals")
# %% [markdown]
# We see that this new tree only manages to fit some of the residuals. We will
# focus on a specific sample from the training set (i.e. we know that the
# sample will be well classified using to successive trees). We will use this
# sample to explain how the predictions of both trees are combined. Let's first
# select this sample in `data_train`.
# %%
data_max = data_train.iloc[-2, 0]
target_true = target_train.iloc[-2]
target_true_residual = residuals.iloc[-2]
# %% [markdown]
# Let's plot the previous information and highlight our sample of interest.
# Let's start by plotting the original data and the prediction of the first
# decision tree.
# %%
# Plot the previous information:
# * the dataset
# * the predictions
# * the residuals
sns.scatterplot(x=data_train["Feature"], y=target_train, color="black",
alpha=0.5)
plt.plot(data_test, target_test_predicted, "--")
for value, true, predicted in zip(data_train["Feature"],
target_train,
target_train_predicted):
lines_residuals = plt.plot([value, value], [true, predicted], color="red")
# Highlight the sample of interest
plt.scatter(data_max, target_true, label="Sample of interest",
color="tab:orange", s=200)
plt.xlim([-1, 0])
plt.legend()
_ = plt.title("Tree predictions")
# %% [markdown]
# Now, let's plot the residuals information. We will plot the residuals
# computed from the first decision tree and show the residual predictions.
# %%
# Plot the previous information:
# * the residuals committed by the first tree
# * the residual predictions
# * the residuals of the residual predictions
sns.scatterplot(x=data_train["Feature"], y=residuals,
color="black", alpha=0.5)
plt.plot(data_test, target_test_predicted_residuals, "--")
for value, true, predicted in zip(data_train["Feature"],
residuals,
target_train_predicted_residuals):
lines_residuals = plt.plot([value, value], [true, predicted], color="red")
# Highlight the sample of interest
plt.scatter(data_max, target_true_residual, label="Sample of interest",
color="tab:orange", s=200)
plt.xlim([-1, 0])
plt.legend()
_ = plt.title("Prediction of the residuals")
# %% [markdown]
# For our sample of interest, our initial tree is making an error (small
# residual). When fitting the second tree, the residual in this case is
# perfectly fitted and predicted. We will quantitatively check this prediction
# using the fitted tree. First, let's check the prediction of the initial tree
# and compare it with the true value.
# %%
print(f"True value to predict for f(x={data_max:.3f}) = {target_true:.3f}")
y_pred_first_tree = tree.predict([[data_max]])[0]
print(f"Prediction of the first decision tree for x={data_max:.3f}: "
f"y={y_pred_first_tree:.3f}")
print(f"Error of the tree: {target_true - y_pred_first_tree:.3f}")
# %% [markdown]
# As we visually observed, we have a small error. Now, we can use the second
# tree to try to predict this residual.
# %%
print(f"Prediction of the residual for x={data_max:.3f}: "
f"{tree_residuals.predict([[data_max]])[0]:.3f}")
# %% [markdown]
# We see that our second tree is capable of predicting the exact residual
# (error) of our first tree. Therefore, we can predict the value of `x` by
# summing the prediction of the all trees in the ensemble.
# %%
y_pred_first_and_second_tree = (
y_pred_first_tree + tree_residuals.predict([[data_max]])[0]
)
print(f"Prediction of the first and second decision trees combined for "
f"x={data_max:.3f}: y={y_pred_first_and_second_tree:.3f}")
print(f"Error of the tree: {target_true - y_pred_first_and_second_tree:.3f}")
# %% [markdown]
# We chose a sample for which only two trees were enough to make the perfect
# prediction. However, we saw in the previous plot that two trees were not
# enough to correct the residuals of all samples. Therefore, one needs to
# add several trees to the ensemble to successfully correct the error.
# (i.e. the second tree corrects the first tree's error, while the third tree
# corrects the second tree's error and so on.)
#
# We will compare the generalization performance of random-forest and gradient
# boosting on the California housing dataset.
# %%
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import cross_validate
data, target = fetch_california_housing(return_X_y=True, as_frame=True)
target *= 100 # rescale the target in k$
# %%
from sklearn.ensemble import GradientBoostingRegressor
gradient_boosting = GradientBoostingRegressor(n_estimators=200)
cv_results_gbdt = cross_validate(
gradient_boosting, data, target, scoring="neg_mean_absolute_error",
n_jobs=2,
)
# %%
print("Gradient Boosting Decision Tree")
print(f"Mean absolute error via cross-validation: "
f"{-cv_results_gbdt['test_score'].mean():.3f} +/- "
f"{cv_results_gbdt['test_score'].std():.3f} k$")
print(f"Average fit time: "
f"{cv_results_gbdt['fit_time'].mean():.3f} seconds")
print(f"Average score time: "
f"{cv_results_gbdt['score_time'].mean():.3f} seconds")
# %%
from sklearn.ensemble import RandomForestRegressor
random_forest = RandomForestRegressor(n_estimators=200, n_jobs=2)
cv_results_rf = cross_validate(
random_forest, data, target, scoring="neg_mean_absolute_error",
n_jobs=2,
)
# %%
print("Random Forest")
print(f"Mean absolute error via cross-validation: "
f"{-cv_results_rf['test_score'].mean():.3f} +/- "
f"{cv_results_rf['test_score'].std():.3f} k$")
print(f"Average fit time: "
f"{cv_results_rf['fit_time'].mean():.3f} seconds")
print(f"Average score time: "
f"{cv_results_rf['score_time'].mean():.3f} seconds")
# %% [markdown]
# In term of computation performance, the forest can be parallelized and will
# benefit from using multiple cores of the CPU. In terms of scoring
# performance, both algorithms lead to very close results.
#
# However, we see that the gradient boosting is a very fast algorithm to
# predict compared to random forest. This is due to the fact that gradient
# boosting uses shallow trees. We will go into details in the next notebook
# about the hyperparameters to consider when optimizing ensemble methods.
| 38.294915 | 79 | 0.717005 |
0e416f250bed301a221befa2e791d36dcfc51c4a | 865 | py | Python | ducktape/utils/local_filesystem_utils.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
]
| null | null | null | ducktape/utils/local_filesystem_utils.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
]
| null | null | null | ducktape/utils/local_filesystem_utils.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
]
| null | null | null | # Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
def mkdir_p(path):
"""mkdir -p functionality.
:type path: str
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 28.833333 | 74 | 0.692486 |
12c8a888330416de78848e233a2e387cecba70f5 | 1,366 | py | Python | utilities/get_client.py | LaudateCorpus1/aruba-fabric-composer-companion | 500bf4153e4658503ea7765ec8cec39847546b8b | [
"Apache-2.0"
]
| null | null | null | utilities/get_client.py | LaudateCorpus1/aruba-fabric-composer-companion | 500bf4153e4658503ea7765ec8cec39847546b8b | [
"Apache-2.0"
]
| null | null | null | utilities/get_client.py | LaudateCorpus1/aruba-fabric-composer-companion | 500bf4153e4658503ea7765ec8cec39847546b8b | [
"Apache-2.0"
]
| 1 | 2022-02-18T07:09:49.000Z | 2022-02-18T07:09:49.000Z | # -*-coding: utf-8 -*-
# (C) Copyright 2019 Hewlett Packard Enterprise Development LP.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __author__ = "@netwookie"
# __credits__ = ["Rick Kauffman"]
# __license__ = "Apache2.0"
# __maintainer__ = "Rick Kauffman"
# __email__ = "[email protected]"
from mongoengine import Q
from database.companion import Companion
from pyhpecfm.client import CFMClient
def access_client():
# Get user informaation.
creds=Companion.objects.first()
username=creds.user.encode('utf-8')
ipaddress=creds.ipaddress.encode('utf-8')
password=creds.passwd.encode('utf-8')
try:
# Create client connection
client=CFMClient(ipaddress,username,password)
client.connect()
except:
error='Failed to obtain a client connetion to the CFM controller.'
return error
return client
| 31.767442 | 74 | 0.72694 |
9ed0d9bc5f5dba837ce326747709b75d4d14c8da | 14,854 | py | Python | source-code/real_single_data_processing.py | asvspoof/ASVspoof2019_system | aaf8ec546e1c9154594bfc93c96b6c4f91f26322 | [
"MIT"
]
| 22 | 2020-02-26T04:00:18.000Z | 2021-12-30T10:58:08.000Z | source-code/real_single_data_processing.py | asvspoof/D3M | b85d256d564e93806c4cadc2257863b4f1b0f588 | [
"MIT"
]
| 13 | 2020-03-02T10:30:23.000Z | 2022-01-13T02:19:11.000Z | source-code/real_single_data_processing.py | asvspoof/D3M | b85d256d564e93806c4cadc2257863b4f1b0f588 | [
"MIT"
]
| 4 | 2020-10-25T12:01:52.000Z | 2020-12-23T08:56:23.000Z | import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from myexp import ex
def get_utt_list(_dir_dataset):
l_utt = []
for root, dirs, files in os.walk(_dir_dataset):
for f in files:
if os.path.splitext(f)[1] == ".npy":
l_utt.append(f.split('.')[0])
return l_utt
def get_utt_and_label_dict(pathToMeta):
l_utt = []
d_meta = {}
with open(pathToMeta, 'r') as f:
l_meta = f.readlines()
for line in l_meta:
_, key, _, _, label = line.strip().split(' ')
d_meta[key] = 1 if label == 'bonafide' else 0
l_utt.append(key)
return l_utt, d_meta
def get_utt_and_label_dict_for_real(pathToMeta):
l_utt = []
d_meta = {}
with open(pathToMeta, 'r') as f:
l_meta = f.readlines()
for line in l_meta:
# print(line)
_, key, _, _, _, _, _, _, _, _, _, _, label = line.strip().split('\t')
d_meta[key] = 1 if label == 'bonafide' else 0
l_utt.append(key)
return l_utt, d_meta
def get_utt_and_label_dict_for_PA_system(pathToMeta):
l_utt = []
d_meta = {}
with open(pathToMeta, 'r') as f:
l_meta = f.readlines()
for line in l_meta:
# print(line)
_, key, _, label, _ = line.strip().split(' ')
if label == '-': # bonafide
d_meta[key] = 0
elif label == 'AA':
d_meta[key] = 1
elif label == 'AB':
d_meta[key] = 2
elif label == 'AC':
d_meta[key] = 3
elif label == 'BA':
d_meta[key] = 4
elif label == 'BB':
d_meta[key] = 5
elif label == 'BC':
d_meta[key] = 6
elif label == 'CA':
d_meta[key] = 7
elif label == 'CB':
d_meta[key] = 8
elif label == 'CC':
d_meta[key] = 9
else:
raise NotImplementedError()
l_utt.append(key)
return l_utt, d_meta
def get_utt_and_label_dict_for_Env(pathToMeta):
l_utt = []
d_meta = {}
env_meta = {}
with open(pathToMeta, 'r') as f:
l_meta = f.readlines()
for line in l_meta:
# print(line)
_, key, EnvID, _, label = line.strip().split(' ')
if EnvID not in env_meta:
env_meta[EnvID] = len(env_meta)
print(env_meta)
d_meta[key] = 1 if label == 'bonafide' else 0
l_utt.append(key)
return l_utt, d_meta, env_meta
def split_genu_spoof(l_in, dir_meta, return_dic_meta=False):
l_gen, l_spo = [], []
d_meta = {}
with open(dir_meta, 'r') as f:
l_meta = f.readlines()
for line in l_meta:
_, key, _, _, label = line.strip().split(' ')
d_meta[key] = 1 if label == 'bonafide' else 0
for k in d_meta.keys():
if d_meta[k] == 1:
l_gen.append(k)
else:
l_spo.append(k)
if return_dic_meta:
return l_gen, l_spo, d_meta
else:
return l_gen, l_spo
def balance_classes(lines_small, lines_big, np_seed):
'''
Balance number of sample per class.
Designed for Binary(two-class) classification.
:param lines_small:
:param lines_big:
:param np_seed:
:return:
'''
len_small_lines = len(lines_small)
len_big_lines = len(lines_big)
idx_big = list(range(len_big_lines))
np.random.seed(np_seed)
np.random.shuffle(lines_big)
new_lines = lines_small + lines_big[:len_small_lines]
np.random.shuffle(new_lines)
# print(new_lines[:5])
return new_lines
class Dataset_ASVspoof2019_PA(Dataset):
def __init__(self, list_IDs, labels, nb_time, base_dir, preload=False):
'''
self.list_IDs : list of strings (each string: utt key)
self.labels : dictionary (key: utt key, value: label integer)
self.nb_time : integer, the number of timesteps for each mini-batch
'''
self.list_IDs = list_IDs
self.labels = labels
self.nb_time = nb_time
self.base_dir = base_dir
self.audios = None
if preload:
self._preload()
def _preload(self):
self.audios = []
'''
Preload dataset to memory
:return:
'''
for id in self.list_IDs:
self.audios.append(np.load(os.path.join(self.base_dir, id + '.npy')))
def __len__(self):
return len(self.list_IDs)
def __getitem__(self, index):
ID = self.list_IDs[index]
if self.audios is not None:
# if dataset is preloaded
X = self.audios[index]
else:
X = np.load(os.path.join(self.base_dir, ID + '.npy'))
# print(X.shape)>>> (1, time, freq)
nb_time = X.shape[1]
if nb_time > self.nb_time:
start_idx = np.random.randint(0, nb_time - self.nb_time)
X = X[:, start_idx:start_idx + self.nb_time, :]
elif nb_time < self.nb_time:
nb_dup = self.nb_time // nb_time + 1
X = np.tile(X, (1, nb_dup, 1))[:, :self.nb_time, :]
return X, self.labels[ID]
class Dataset_ASVspoof2019_PA_Multi_Task(Dataset):
def __init__(self, list_IDs, target1, target2, nb_time, base_dir, preload=False):
'''
self.list_IDs : list of strings (each string: utt key)
self.labels : dictionary (key: utt key, value: label integer)
self.nb_time : integer, the number of timesteps for each mini-batch
'''
self.list_IDs = list_IDs
self.target1 = target1
self.target2 = target2
self.nb_time = nb_time
self.base_dir = base_dir
self.audios = None
if preload:
self._preload()
def _preload(self):
self.audios = []
'''
Preload dataset to memory
:return:
'''
for id in self.list_IDs:
self.audios.append(np.load(os.path.join(self.base_dir, id + '.npy')))
def __len__(self):
return len(self.list_IDs)
def __getitem__(self, index):
ID = self.list_IDs[index]
if self.audios is not None:
# if dataset is preloaded
X = self.audios[index]
else:
X = np.load(os.path.join(self.base_dir, ID + '.npy'))
# print(X.shape)>>> (1, time, freq)
nb_time = X.shape[1]
if nb_time > self.nb_time:
start_idx = np.random.randint(0, nb_time - self.nb_time)
X = X[:, start_idx:start_idx + self.nb_time, :]
elif nb_time < self.nb_time:
nb_dup = self.nb_time // nb_time + 1
X = np.tile(X, (1, nb_dup, 1))[:, :self.nb_time, :]
return X, self.target1[ID], self.target2[ID]
#
# class data_prefetcher(object):
# def __init__(self, loader):
# self.loader = iter(loader)
# self.stream = torch.cuda.Stream()
# # self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
# # self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# # With Amp, it isn't necessary to manually convert data to half.
# # if args.fp16:
# # self.mean = self.mean.half()
# # self.std = self.std.half()
# self.preload()
#
# def preload(self):
# try:
# self.next_input, self.next_target1, self.next_target2 = next(self.loader)
# except StopIteration:
# self.next_input = None
# self.next_target1 = None
# self.next_target2 = None
# return
# with torch.cuda.stream(self.stream):
# self.next_input = self.next_input.cuda(non_blocking=True)
# self.next_target1 = self.next_target1.cuda(non_blocking=True)
# self.next_target2 = self.next_target2.cuda(non_blocking=True)
# # With Amp, it isn't necessary to manually convert data to half.
# # if args.fp16:
# # self.next_input = self.next_input.half()
# # else:
# # self.next_input = self.next_input.float()
# # self.next_input = self.next_input.sub_(self.mean).div_(self.std)
#
# def next(self):
# torch.cuda.current_stream().wait_stream(self.stream)
# input = self.next_input
# target1 = self.next_target1
# target2 = self.next_target2
# self.preload()
# return input, target1, target2
#
# def __iter__(self):
# return self
class data_prefetcher(object):
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
# self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
# self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
# self.next_input = self.next_input.float()
# self.next_input = self.next_input.sub_(self.mean).div_(self.std)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
def __iter__(self):
return self
@ex.capture
def data_loader(train_batch, dev_batch, num_workers, nb_time, train_dir, dev_dir, eval_dir, trainProtocolFile,
devProtocolFile,
evalProtocolFile):
# get 4 utt_lists
# list_trn = get_utt_list(train_dir)
# list_dev = get_utt_list(dev_dir)
# list_eval = get_utt_list(eval_dir)
#
# l_gen_trn, l_spo_trn, d_label_trn = split_genu_spoof(l_in=list_trn, dir_meta=trainProtocolFile,
# return_dic_meta=True)
# l_gen_dev, l_spo_dev, d_label_dev = split_genu_spoof(l_in=list_dev, dir_meta=devProtocolFile,
# return_dic_meta=True)
# l_gen_eval, l_spo_eval, d_label_eval = split_genu_spoof(l_in=list_eval, dir_meta=evalProtocolFile,
# return_dic_meta=True)
# del list_trn, list_dev, list_eval
# Update 2019-7-14: Using weighted CrossEntropyLoss
# which is particularly useful when you have an unbalanced training set.
# # get balanced validation utterance list.
# if len(l_gen_trn) > len(l_spo_trn):
# l_train_utt = balance_classes(l_spo_trn, l_gen_trn, np_seed=0)
# else:
# l_train_utt = balance_classes(l_gen_trn, l_spo_trn, np_seed=0)
# if len(l_gen_dev) > len(l_spo_dev):
# l_dev_utt = balance_classes(l_spo_dev, l_gen_dev, np_seed=0)
# else:
# l_dev_utt = balance_classes(l_gen_dev, l_spo_dev, np_seed=0)
# if len(l_gen_eval) > len(l_spo_eval):
# l_eval_utt = balance_classes(l_spo_eval, l_gen_eval, np_seed=0)
# else:
# l_eval_utt = balance_classes(l_gen_eval, l_spo_eval, np_seed=0)
# del l_gen_trn, l_spo_trn, l_gen_dev, l_spo_dev, l_gen_eval, l_spo_eval
# define dataset generators
# l_gen_trn.extend(l_spo_trn)
# l_trn = l_gen_trn
# del l_spo_trn
# l_gen_dev.extend(l_spo_dev)
# l_dev = l_gen_dev
# del l_spo_dev
# l_gen_eval.extend(l_spo_eval)
# l_eval = l_gen_eval
# del l_spo_eval
l_trn, d_label_trn = get_utt_and_label_dict(trainProtocolFile)
_, d_label2_trn = get_utt_and_label_dict_for_PA_system(trainProtocolFile)
l_dev, d_label_dev = get_utt_and_label_dict(devProtocolFile)
_, d_label2_dev = get_utt_and_label_dict_for_PA_system(devProtocolFile)
# l_eval, d_label_eval = get_utt_and_label_dict(evalProtocolFile)
# _, d_label2_eval = get_utt_and_label_dict_for_PA_system(evalProtocolFile)
l_eval, d_label_eval = get_utt_and_label_dict_for_real(evalProtocolFile)
trainset = Dataset_ASVspoof2019_PA(list_IDs=l_trn,
labels=d_label_trn,
nb_time=nb_time,
base_dir=train_dir)
train_loader = DataLoader(trainset,
batch_size=train_batch,
shuffle=True,
num_workers=num_workers,
pin_memory=True
)
devset = Dataset_ASVspoof2019_PA(list_IDs=l_dev,
labels=d_label_dev,
nb_time=nb_time,
base_dir=dev_dir,
preload=False)
dev_loader = DataLoader(devset,
batch_size=dev_batch,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
evalset = Dataset_ASVspoof2019_PA(list_IDs=l_eval,
labels=d_label_eval,
nb_time=nb_time,
base_dir=eval_dir,
preload=False)
eval_loader = DataLoader(evalset,
batch_size=dev_batch,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
return train_loader, dev_loader, eval_loader
if __name__ == '__main__':
l_utt = []
d_meta = {}
# l_utt,d_meta= get_utt_and_label_dict("/home/student/dyq/anti-spoofing/ASVspoof2019/ASVspoof2019_PA_real/ASVspoof2019_PA_cm_protocols/ASVspoof2019.PA.real.cm.eval.trl.txt")
l_utt, d_meta = get_utt_and_label_dict_for_real(
"/home/student/dyq/anti-spoofing/ASVspoof2019/ASVspoof2019_PA_real/ASVspoof2019_PA_cm_protocols/ASVspoof2019.PA.real.cm.eval.trl.txt")
l_utt, d_meta = get_utt_and_label_dict_for_PA_system(
"/home/student/dyq/anti-spoofing/ASVspoof2019/PA/ASVspoof2019_PA_cm_protocols/ASVspoof2019.PA.cm.train.trn.txt")
for index in range(len(l_utt)):
print(l_utt[index] + " " + str(d_meta[l_utt[index]]))
| 34.868545 | 177 | 0.575737 |
e9b06c309f9a35f769774d881ec3acf099e6c2ac | 1,178 | py | Python | capablerobot_camera/deserializers/__init__.py | CapableRobot/CapableRobot_Camera_Python | 3880e04e243ad21783c6a67563d83519a23d3eb4 | [
"MIT"
]
| null | null | null | capablerobot_camera/deserializers/__init__.py | CapableRobot/CapableRobot_Camera_Python | 3880e04e243ad21783c6a67563d83519a23d3eb4 | [
"MIT"
]
| null | null | null | capablerobot_camera/deserializers/__init__.py | CapableRobot/CapableRobot_Camera_Python | 3880e04e243ad21783c6a67563d83519a23d3eb4 | [
"MIT"
]
| null | null | null | # The MIT License (MIT)
#
# Copyright (c) 2019 Chris Osterwood for Capable Robot Components
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .max9286 import MAX9286 | 51.217391 | 79 | 0.780136 |
87f08808a1a1d313a68f6399ee93d2e7ec22b8e4 | 5,264 | py | Python | otter/assign/cell_generators.py | nalderto/otter-grader | a4714bf48df07b7eb8b3c41530ce7a778fd42c98 | [
"BSD-3-Clause"
]
| null | null | null | otter/assign/cell_generators.py | nalderto/otter-grader | a4714bf48df07b7eb8b3c41530ce7a778fd42c98 | [
"BSD-3-Clause"
]
| null | null | null | otter/assign/cell_generators.py | nalderto/otter-grader | a4714bf48df07b7eb8b3c41530ce7a778fd42c98 | [
"BSD-3-Clause"
]
| null | null | null | """
Miscellaneous cell generators for Otter Assign
"""
import copy
import nbformat
from .constants import MD_RESPONSE_CELL_SOURCE
from .utils import get_source, lock
def gen_init_cell():
"""
Generates a cell to initialize Otter in the notebook. The code cell has the following contents:
.. code-block:: python
# Initialize Otter
import otter
grader = otter.Notebook()
Returns:
``nbformat.NotebookNode``: the init cell
"""
cell = nbformat.v4.new_code_cell("# Initialize Otter\nimport otter\ngrader = otter.Notebook()")
lock(cell)
return cell
def gen_markdown_response_cell():
"""
Generates a Markdown response cell with the following contents:
.. code-block:: markdown
_Type your answer here, replacing this text._
Returns:
``nbformat.NotebookNode``: the response cell
"""
return nbformat.v4.new_markdown_cell(MD_RESPONSE_CELL_SOURCE)
def gen_export_cells(instruction_text, assignment, pdf=True, filtering=True):
"""
Generates export cells that instruct the student the run a code cell calling
``otter.Notebook.export`` to generate and download their submission. The Markdown cell contains:
.. code-block:: markdown
## Submission
Make sure you have run all cells in your notebook in order before running the cell below, so
that all images/graphs appear in the output. The cell below will generate a zipfile for you
to submit. **Please save before exporting!**
Additional instructions can be appended to this cell by passing a string to ``instruction_text``.
The code cell contains:
.. code-block:: python
# Save your notebook first, then run this cell to export your submission.
grader.export()
The call to ``grader.export()`` contains different arguments based on the values passed to ``pdf``
and ``filtering``.
Args:
instruction_text (``str``): extra instructions for students when exporting
assignment (``otter.assign.assignment.Assignment``): the assignment configurations
pdf (``bool``, optional): whether a PDF is needed
filtering (``bool``, optional): whether PDF filtering is needed
Returns:
``list`` of ``nbformat.NotebookNode``: generated export cells
"""
instructions = nbformat.v4.new_markdown_cell()
instructions.source = "## Submission\n\nMake sure you have run all cells in your notebook in order before " \
"running the cell below, so that all images/graphs appear in the output. The cell below will generate " \
"a zip file for you to submit. **Please save before exporting!**"
if instruction_text:
instructions.source += '\n\n' + instruction_text
export = nbformat.v4.new_code_cell()
source_lines = ["# Save your notebook first, then run this cell to export your submission."]
if filtering and pdf:
source_lines.append(f"grader.export(\"{assignment.master}\")")
elif not filtering:
source_lines.append(f"grader.export(\"{assignment.master}\", filtering=False)")
else:
source_lines.append(f"grader.export(\"{assignment.master}\", pdf=False)")
export.source = "\n".join(source_lines)
lock(instructions)
lock(export)
return [instructions, export, nbformat.v4.new_markdown_cell(" ")] # last cell is buffer
def gen_check_all_cell():
"""
Generates a check-all cell and a Markdown cell with instructions to run all tests in the notebook.
The Markdown cell has the following contents:
.. code-block:: markdown
---
To double-check your work, the cell below will rerun all of the autograder tests.
The code cell has the following contents:
.. code-block:: python
grader.check_all()
Returns:
``list`` of ``nbformat.NotebookNode``: generated check-all cells
"""
instructions = nbformat.v4.new_markdown_cell()
instructions.source = "---\n\nTo double-check your work, the cell below will rerun all of the autograder tests."
check_all = nbformat.v4.new_code_cell("grader.check_all()")
lock(instructions)
lock(check_all)
return [instructions, check_all]
def gen_close_export_cell():
"""
Generates a Markdown cell to end question export for PDF filtering. The cell contains:
.. code-block:: markdown
<!-- END QUESTION -->
Returns:
``nbformat.NotebookNode``: new Markdown cell with ``<!-- END QUESTION -->``
"""
cell = nbformat.v4.new_markdown_cell("<!-- END QUESTION -->")
lock(cell)
return cell
def add_close_export_to_cell(cell):
"""Adds an HTML comment to close question export for PDF filtering to the top of ``cell``. ``cell``
should be a Markdown cell. This adds ``<!-- END QUESTION-->`` as the first line of the cell.
Args:
cell (``nbformat.NotebookNode``): the cell to add the close export to
Returns:
``nbformat.NotebookNode``: the cell with the close export comment at the top
"""
cell = copy.deepcopy(cell)
source = get_source(cell)
source = ["<!-- END QUESTION -->\n", "\n"] + source
cell['source'] = "\n".join(source)
return cell
| 33.106918 | 116 | 0.669073 |
0cf7f15e5fe624698fdca9751b312187a4999a64 | 681 | py | Python | Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/samplers/__init__.py | linuxonly801/awesome-DeepLearning | b063757fa130c4d56aea5cce2e592610f1e169f9 | [
"Apache-2.0"
]
| 5 | 2022-01-30T07:35:58.000Z | 2022-02-08T05:45:20.000Z | Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/samplers/__init__.py | linuxonly801/awesome-DeepLearning | b063757fa130c4d56aea5cce2e592610f1e169f9 | [
"Apache-2.0"
]
| 1 | 2022-01-14T02:33:28.000Z | 2022-01-14T02:33:28.000Z | Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/samplers/__init__.py | linuxonly801/awesome-DeepLearning | b063757fa130c4d56aea5cce2e592610f1e169f9 | [
"Apache-2.0"
]
| 1 | 2022-01-24T16:27:01.000Z | 2022-01-24T16:27:01.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .random_sampler import RandomSampler
__all__ = ['RandomSampler']
| 37.833333 | 74 | 0.767988 |
2b515f6bcef12b20e05b266be9e86dc8353738a1 | 5,397 | py | Python | mmaction/datasets/extra_aug.py | evgeny-izutov/mmaction | c92d29af87f2373641e2d29bdb14705939fc5423 | [
"Apache-2.0"
]
| 1 | 2021-03-13T13:07:39.000Z | 2021-03-13T13:07:39.000Z | mmaction/datasets/extra_aug.py | evgeny-izutov/mmaction | c92d29af87f2373641e2d29bdb14705939fc5423 | [
"Apache-2.0"
]
| null | null | null | mmaction/datasets/extra_aug.py | evgeny-izutov/mmaction | c92d29af87f2373641e2d29bdb14705939fc5423 | [
"Apache-2.0"
]
| 1 | 2021-03-13T13:08:03.000Z | 2021-03-13T13:08:03.000Z | import mmcv
import numpy as np
class PhotoMetricDistortion(object):
def __init__(self, brightness_range=None, contrast_range=None, saturation_range=None,
hue_delta=None, noise_sigma=None, color_scale=None):
self.brightness_lower, self.brightness_upper =\
brightness_range if brightness_range is not None else (None, None)
self.contrast_lower, self.contrast_upper =\
contrast_range if contrast_range is not None else (None, None)
self.saturation_lower, self.saturation_upper =\
saturation_range if saturation_range is not None else (None, None)
self.hue_delta = hue_delta if hue_delta is not None else None
self.noise_sigma = noise_sigma if noise_sigma is not None else None
self.color_scale_lower, self.color_scale_upper = color_scale if color_scale is not None else (None, None)
@property
def _with_brightness(self):
return self.brightness_lower is not None and self.brightness_upper is not None
@property
def _with_contrast(self):
return self.contrast_lower is not None and self.contrast_upper is not None
@property
def _with_saturation(self):
return self.saturation_lower is not None and self.saturation_upper is not None
@property
def _with_hue(self):
return self.hue_delta is not None
@property
def _with_noise(self):
return self.noise_sigma is not None
@property
def _with_color_scale(self):
return self.color_scale_lower is not None and self.color_scale_upper is not None
@staticmethod
def _augm(img, brightness_delta, contrast_mode, contrast_alpha, saturation_alpha,
hue_delta, noise_sigma, color_scales):
def _clamp_image(_img):
_img[_img < 0.0] = 0.0
_img[_img > 255.0] = 255.0
return _img
img = img.astype(np.float32)
# random brightness
if brightness_delta is not None:
img += brightness_delta
img = _clamp_image(img)
# random contrast
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
if contrast_mode == 1:
if contrast_alpha is not None:
img *= contrast_alpha
img = _clamp_image(img)
# convert color from BGR to HSV
if saturation_alpha is not None or hue_delta is not None:
img = mmcv.bgr2hsv(img / 255.)
# random saturation
if saturation_alpha is not None:
img[:, :, 1] *= saturation_alpha
img[:, :, 1][img[:, :, 1] > 1.0] = 1.0
img[:, :, 1][img[:, :, 1] < 0.0] = 0.0
# random hue
if hue_delta is not None:
img[:, :, 0] += hue_delta
img[:, :, 0][img[:, :, 0] > 360.0] -= 360.0
img[:, :, 0][img[:, :, 0] < 0.0] += 360.0
# convert color from HSV to BGR
if saturation_alpha is not None or hue_delta is not None:
img = mmcv.hsv2bgr(img) * 255.
# random contrast
if contrast_mode == 0:
if contrast_alpha is not None:
img *= contrast_alpha
img = _clamp_image(img)
if color_scales is not None:
img *= color_scales.reshape((1, 1, -1))
# gaussian noise
if noise_sigma is not None:
img += np.random.normal(loc=0.0, scale=noise_sigma, size=img.shape)
# clamp
img = _clamp_image(img)
return img.astype(np.uint8)
def __call__(self, img_group):
if self._with_brightness and np.random.randint(2):
images_mean_brightness = [np.mean(img) for img in img_group]
image_brightness = np.random.choice(images_mean_brightness)
brightness_delta_limits = [self.brightness_lower - image_brightness,
self.brightness_upper - image_brightness]
if image_brightness < self.brightness_lower:
brightness_delta_limits[0] = 0.0
elif image_brightness > self.brightness_upper:
brightness_delta_limits[1] = 0.0
brightness_delta = np.random.uniform(brightness_delta_limits[0], brightness_delta_limits[1])
else:
brightness_delta = None
contrast_mode = np.random.randint(2)
contrast_alpha = np.random.uniform(self.contrast_lower, self.contrast_upper) \
if self._with_contrast and np.random.randint(2) else None
saturation_alpha = np.random.uniform(self.saturation_lower, self.saturation_upper) \
if self._with_saturation and np.random.randint(2) else None
hue_delta = np.random.uniform(-self.hue_delta, self.hue_delta)\
if self._with_hue and np.random.randint(2) else None
noise_sigma = np.random.uniform(self.noise_sigma[0], self.noise_sigma[1])\
if self._with_noise and np.random.randint(2) else None
color_scales = np.random.uniform(self.color_scale_lower, self.color_scale_upper, size=3)\
if self._with_color_scale and np.random.randint(2) else None
img_group = [self._augm(img, brightness_delta, contrast_mode, contrast_alpha,
saturation_alpha, hue_delta, noise_sigma, color_scales)
for img in img_group]
return img_group
| 38.55 | 113 | 0.625903 |
29fbf4df56da53e507777674f4e5f1014faa7ffe | 6,377 | py | Python | test/test_output_formatter.py | SophieAu/todoster | 6f69f7b254683d63f60f934eafa8971e78df7eb2 | [
"MIT"
]
| 5 | 2020-08-05T21:02:35.000Z | 2021-11-11T14:31:35.000Z | test/test_output_formatter.py | SophieAu/todoster | 6f69f7b254683d63f60f934eafa8971e78df7eb2 | [
"MIT"
]
| 1 | 2020-09-24T04:41:20.000Z | 2020-09-28T04:37:50.000Z | test/test_output_formatter.py | SophieAu/todoster | 6f69f7b254683d63f60f934eafa8971e78df7eb2 | [
"MIT"
]
| 1 | 2021-08-09T19:23:24.000Z | 2021-08-09T19:23:24.000Z | import unittest
from todoster import output_formatter
class Testformat(unittest.TestCase):
bold = "\033[1m"
dim = "\033[2m"
ul = "\033[4m"
reset = "\033[0m"
color_default = "\033[39m"
color_red = "\033[31m"
def setUp(self):
pass
def tearDown(self):
pass
def test_format_headline(self):
expected_base = self.bold + self.ul
input_string = "merp"
self.assertEqual(output_formatter.format_headline(input_string), expected_base + self.color_default + input_string + self.reset)
self.assertEqual(output_formatter.format_headline(input_string, color="red"), expected_base + self.color_red + input_string + self.reset)
appendix = "something"
expected = self.ul + " (" + appendix + ")" + self.reset
self.assertEqual(output_formatter.format_headline("", appendix), expected_base + self.color_default + self.reset + expected)
def test_format_task(self): #pylint: disable=R0914
mock_task = {"id": 2,
"title": "finish front-page design",
"date": "2018-09-02",
"week": "2018W35",
"location": "Somewhere",
"project": {
"shortcode": "project",
"color": "blue"
},
"highPriority": True,
"isDone": False
}
prio = "\033[33m★" + self.reset
done = " " + "☐"
id_val = " " + self.dim + " 2." + self.reset
title = " finish front-page design" + self.reset
date = " " + self.bold + "@2018-09-02" + self.reset
week = " " + self.bold + "@W35" + self.reset
location = " " + "->Somewhere" + self.reset
project = " " + self.bold + "\033[34m" + "#project" + self.reset
self.assertEqual(output_formatter.format_task(mock_task), prio + done + id_val + title + date + location + project)
mock_task["isDone"] = True
done_done = " " + "\033[32m✓" + self.reset
done_title = " " + self.dim + "finish front-page design" + self.reset
done_date = " " + self.bold + self.dim + "@2018-09-02" + self.reset
done_location = " " + self.dim + "->Somewhere" + self.reset
done_project = " " + self.bold + self.dim + "\033[34m" + "#project" + self.reset
self.assertEqual(output_formatter.format_task(mock_task), prio + done_done + id_val + done_title + done_date + done_location + done_project)
mock_task["isDone"] = False
mock_task["date"] = ""
self.assertEqual(output_formatter.format_task(mock_task), prio + done + id_val + title + week + location + project)
mock_task["location"] = ""
self.assertEqual(output_formatter.format_task(mock_task), prio + done + id_val + title + week + project)
mock_task["project"] = ""
self.assertEqual(output_formatter.format_task(mock_task), prio + done + id_val + title + week)
mock_task["week"] = ""
self.assertEqual(output_formatter.format_task(mock_task), prio + done + id_val + title)
mock_task["highPriority"] = False
self.assertEqual(output_formatter.format_task(mock_task), " " + done + id_val + title)
def test_format_project(self):
mock_proj = {
"id": 22,
"title": "work",
"shortcode": "work",
"color": "red",
"active": True
}
expected = "\"work\" (" + self.color_red + "#work" + self.reset + ")"
self.assertEqual(output_formatter.format_project(mock_proj), expected)
def test_format_task_block(self):
mock_tasks = [{"id": 2,
"title": "finish front-page design",
"date": "2018-09-02",
"week": "2018W35",
"location": "Somewhere",
"project": {
"shortcode": "project",
"color": "blue"
},
"highPriority": True,
"isDone": False
},
{"id": 3,
"title": "finish front-page design",
"date": "2018-09-02",
"week": "2018W35",
"location": "Somewhere",
"project": {
"shortcode": "project",
"color": "blue"
},
"highPriority": True,
"isDone": False}]
context = "this"
expected_task_format = output_formatter.format_task(mock_tasks[0]) + "\n" + output_formatter.format_task(mock_tasks[1]) + "\n\n"
expected_empty_message = self.dim + " --- No tasks for this " + context + " ---" + self.reset + "\n\n"
self.assertEqual(output_formatter.format_task_block(context, []), expected_empty_message)
self.assertEqual(output_formatter.format_task_block(context, mock_tasks), expected_task_format)
mock_no_proj = mock_tasks
mock_no_proj[0]["project"] = ""
mock_no_proj[1]["project"] = ""
expected_task_format = output_formatter.format_task(mock_no_proj[0]) + "\n" + output_formatter.format_task(mock_no_proj[1]) + "\n\n"
self.assertEqual(output_formatter.format_task_block(context, mock_tasks, print_project=False), expected_task_format)
mock_no_date = mock_tasks
mock_no_date[0]["date"] = ""
mock_no_date[0]["week"] = ""
mock_no_date[1]["date"] = ""
mock_no_date[1]["week"] = ""
expected_task_format = output_formatter.format_task(mock_no_date[0]) + "\n" + output_formatter.format_task(mock_no_date[1]) + "\n\n"
self.assertEqual(output_formatter.format_task_block(context, mock_tasks, print_date=False), expected_task_format)
mock_no_loc = mock_tasks
mock_no_loc[0]["location"] = ""
mock_no_loc[1]["location"] = ""
expected_task_format = output_formatter.format_task(mock_no_loc[0]) + "\n" + output_formatter.format_task(mock_no_loc[1]) + "\n\n"
self.assertEqual(output_formatter.format_task_block(context, mock_tasks, print_location=False), expected_task_format)
if __name__ == '__main__':
unittest.main()
| 42.798658 | 148 | 0.559511 |
40ee4638590c00c60552c948504d2288f1bfbe07 | 23,171 | py | Python | qcodes/instrument_drivers/tektronix/AWG520.py | LGruenhaupt/Qcodes | ffb74dae53c13c4885e61b5a2df3f833d524de04 | [
"MIT"
]
| 1 | 2019-12-07T01:25:49.000Z | 2019-12-07T01:25:49.000Z | qcodes/instrument_drivers/tektronix/AWG520.py | Dominik-Vogel/Qcodes | b4cf7d58bc1bf3be97af6bf48f57cb6b87d588bb | [
"MIT"
]
| 12 | 2020-10-13T16:53:37.000Z | 2020-10-14T17:16:22.000Z | qcodes/instrument_drivers/tektronix/AWG520.py | Dominik-Vogel/Qcodes | b4cf7d58bc1bf3be97af6bf48f57cb6b87d588bb | [
"MIT"
]
| 1 | 2020-05-03T22:47:40.000Z | 2020-05-03T22:47:40.000Z | # Tektronix_AWG520.py class, to perform the communication between the Wrapper and the device
# Pieter de Groot <[email protected]>, 2008
# Martijn Schaafsma <[email protected]>, 2008
# Vishal Ranjan, 2012
# Ron schutjens, 2012
# Adriaan Rol, 2016 Ported to QCodes
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import time
import logging
import numpy as np
import struct
from qcodes import VisaInstrument, validators as vals
from qcodes.utils.deprecate import deprecate_moved_to_qcd
@deprecate_moved_to_qcd(alternative="qcodes_contrib_drivers.drivers.Tektronix.AWG520.Tektronix_AWG520")
class Tektronix_AWG520(VisaInstrument):
'''
This is the python driver for the Tektronix AWG520
Arbitrary Waveform Generator
.. todo::
1) Get All
2) Remove test_send??
3) Add docstrings
.. todo::
use inheritance for common use with 520, currently contains
a lot of repetition
'''
def __init__(self, name, address, reset=False, clock=1e9, numpoints=1000,
**kw):
'''
Initializes the AWG520.
Args:
name (str) : name of the instrument
address (str) : GPIB address (Note: 520 cannot be controlled
via ethernet)
reset (bool) : resets to default values, default=false
numpoints (int) : sets the number of datapoints
Output:
None
'''
super().__init__(name, address, **kw)
self._address = address
self._values = {}
self._values['files'] = {}
self._clock = clock
self._numpoints = numpoints
self._fname = ''
self.add_function('reset', call_cmd='*RST')
self.add_parameter('state',
get_cmd=self.get_state)
# Add parameters
self.add_parameter('trigger_mode',
get_cmd='AWGC:RMOD?',
set_cmd='AWGC:RMOD ' + '{}',
vals=vals.Enum('CONT', 'TRIG', 'ENH', 'GAT'))
self.add_parameter('trigger_impedance',
unit='Ohm',
label='Trigger impedance (Ohm)',
get_cmd='TRIG:IMP?',
set_cmd='TRIG:IMP '+'{}',
vals=vals.Enum(50, 1000),
get_parser=float)
self.add_parameter('trigger_level',
unit='V',
label='Trigger level (V)',
get_cmd='TRIG:LEV?',
set_cmd='TRIG:LEV '+'{:.3f}',
vals=vals.Numbers(-5, 5),
get_parser=float)
self.add_parameter('clock_freq',
label='Clock frequency (Hz)',
get_cmd='SOUR:FREQ?',
set_cmd='SOUR:FREQ '+'{}',
vals=vals.Numbers(1e6, 1e9),
get_parser=float)
# Todo check if max freq is 1.2 GHz for the AWG 520 aswell
self.add_parameter('numpoints',
label='Number of datapoints per wave',
get_cmd=self._do_get_numpoints,
set_cmd=self._do_set_numpoints,
vals=vals.Ints(100, int(1e9)))
for ch in [1, 2]:
amp_cmd = 'SOUR{}:VOLT:LEV:IMM:AMPL'.format(ch)
offset_cmd = 'SOUR{}:VOLT:LEV:IMM:OFFS'.format(ch)
self.add_parameter(
'ch{}_filename'.format(ch), set_cmd=self._gen_ch_set_func(
self._do_set_filename, ch), vals=vals.Anything())
self.add_parameter('ch{}_amp'.format(ch),
label='Amplitude channel {} (V)'.format(ch),
unit='V',
get_cmd=amp_cmd + '?',
set_cmd=amp_cmd + ' {:.6f}',
vals=vals.Numbers(0.02, 2.0),
get_parser=float)
self.add_parameter('ch{}_offset'.format(ch),
label='Offset channel {} (V)'.format(ch),
unit='V',
get_cmd=offset_cmd + '?',
set_cmd=offset_cmd + ' {:.3f}',
vals=vals.Numbers(-1.0, 1.0),
get_parser=float)
self.add_parameter('ch{}_status'.format(ch),
get_cmd='OUTP{}?'.format(ch),
set_cmd='OUTP{}'.format(ch) + ' {}',
vals=vals.Enum('ON', 'OFF'),
get_parser=float)
for j in [1, 2]:
# TODO: check that 520 does not have marker delay feature
# m_del_cmd = 'SOUR{}:MARK{}:DEL'.format(ch, j)
m_high_cmd = 'SOUR{}:MARK{}:VOLT:LEV:IMM:HIGH'.format(ch, j)
m_low_cmd = 'SOUR{}:MARK{}:VOLT:LEV:IMM:LOW'.format(ch, j)
self.add_parameter(
'ch{}_m{}_high'.format(ch, j),
label='Channel {} Marker {} high level (V)'.format(ch, j),
get_cmd=m_high_cmd + '?',
set_cmd=m_high_cmd + ' {:.3f}',
vals=vals.Numbers(-2., 2.),
get_parser=float)
self.add_parameter(
'ch{}_m{}_low'.format(ch, j),
label='Channel {} Marker {} low level (V)'.format(ch, j),
get_cmd=m_low_cmd + '?',
set_cmd=m_low_cmd + ' {:.3f}',
vals=vals.Numbers(-2., 2.),
get_parser=float)
# Add functions
if reset:
self.reset()
else:
self.get_all()
self.connect_message()
# Functions
def _gen_ch_set_func(self, fun, ch):
def set_func(val):
return fun(ch, val)
return set_func
def _gen_ch_get_func(self, fun, ch):
def get_func():
return fun(ch)
return get_func
# get state AWG
def get_state(self):
state = self.visa_handle.ask('AWGC:RSTATE?')
if state.startswith('0'):
return 'Idle'
elif state.startswith('1'):
return 'Waiting for trigger'
elif state.startswith('2'):
return 'Running'
else:
logging.error(__name__ + ' : AWG in undefined state')
return 'error'
def start(self):
self.visa_handle.write('AWGC:RUN')
return
def stop(self):
self.visa_handle.write('AWGC:STOP')
def get_folder_contents(self):
return self.visa_handle.ask('mmem:cat?')
def get_current_folder_name(self):
return self.visa_handle.ask('mmem:cdir?')
def set_current_folder_name(self, file_path):
self.visa_handle.write('mmem:cdir "%s"' % file_path)
def change_folder(self, dir):
self.visa_handle.write('mmem:cdir "%s"' % dir)
def goto_root(self):
self.visa_handle.write('mmem:cdir')
def make_directory(self, dir, root):
'''
makes a directory
if root = True, new dir in main folder
'''
if root:
self.goto_root()
self.visa_handle.write('MMEMory:MDIRectory "{}"'.format(dir))
else:
self.visa_handle.write('MMEMory:MDIRectory "{}"'.format(dir))
def get_all(self, update=True):
# TODO: fix bug in snapshot where it tries to get setable only param
# return self.snapshot(update=update)
return self.snapshot(update=False)
def clear_waveforms(self):
'''
Clears the waveform on both channels.
Input:
None
Output:
None
'''
logging.debug(__name__ + ' : Clear waveforms from channels')
self.visa_handle.write('SOUR1:FUNC:USER ""')
self.visa_handle.write('SOUR2:FUNC:USER ""')
def force_trigger(self):
'''
forces a trigger event (used for wait_trigger option in sequences)
Ron
'''
return self.visa_handle.write('TRIG:SEQ:IMM')
def force_logicjump(self):
'''
forces a jumplogic event (used as a conditional event during waveform
executions)
note: jump_logic events&mode have to be set properly!
Ron
'''
return self.visa_handle.write('AWGC:EVEN:SEQ:IMM')
def set_jumpmode(self, mode):
'''
sets the jump mode for jump logic events, possibilities:
LOGic,TABle,SOFTware
give mode as string
note: jump_logic events&mode have to be set properly!
Ron
'''
return self.visa_handle.write('AWGC:ENH:SEQ:JMOD %s' % mode)
def get_jumpmode(self, mode):
'''
get the jump mode for jump logic events
Ron
'''
return self.visa_handle.ask('AWGC:ENH:SEQ:JMOD?')
def _do_get_numpoints(self):
'''
Returns the number of datapoints in each wave
Input:
None
Output:
numpoints (int) : Number of datapoints in each wave
'''
return self._numpoints
def _do_set_numpoints(self, numpts):
'''
Sets the number of datapoints in each wave.
This acts on both channels.
Input:
numpts (int) : The number of datapoints in each wave
Output:
None
'''
logging.debug(__name__ + ' : Trying to set numpoints to %s' % numpts)
if numpts != self._numpoints:
logging.warning(__name__ + ' : changing numpoints. This will clear all waveforms!')
response = 'yes' # raw_input('type "yes" to continue')
if response == 'yes':
logging.debug(__name__ + ' : Setting numpoints to %s' % numpts)
self._numpoints = numpts
self.clear_waveforms()
else:
print('aborted')
def set_setup_filename(self, fname, force_reload=False):
if self._fname == fname and not force_reload:
print('File %s already loaded in AWG520' % fname)
return
else:
self._fname = fname
filename = "\%s/%s.seq" % (fname, fname)
self.set_sequence(filename=filename)
print('Waiting for AWG to load file "%s"' % fname)
sleeptime = 0.5
# while state idle is not possible due to timeout error while loading
t0 = time.time()
while(time.time()-t0 < 360):
try:
if self.get_state() == 'Idle':
break
except:
time.sleep(sleeptime)
print('.')
self.get_state()
print('Loading file took %.2fs' % (time.time()-t0))
return
def _do_set_filename(self, name, channel):
'''
Specifies which file has to be set on which channel
Make sure the file exists, and the numpoints and clock of the file
matches the instrument settings.
If file doesn't exist an error is raised, if the numpoints doesn't match
the command is neglected
Input:
name (str) : filename of uploaded file
channel (int) : 1 or 2, the number of the designated channel
Output:
None
'''
logging.debug(__name__ + ' : Try to set {} on channel {}'.format(
name, channel))
exists = False
if name in self._values['files']:
exists = True
logging.debug(__name__ + ' : File exists in loacal memory')
self._values['recent_channel_%s' % channel] = self._values[
'files'][name]
self._values['recent_channel_%s' % channel]['filename'] = name
else:
logging.debug(__name__ + ' : File does not exist in memory, \
reading from instrument')
lijst = self.visa_handle.ask('MMEM:CAT? "MAIN"')
bool = False
bestand = ""
for i in range(len(lijst)):
if (lijst[i] =='"'):
bool = True
elif (lijst[i] == ','):
bool = False
if (bestand == name):
exists = True
bestand = ""
elif bool:
bestand = bestand + lijst[i]
if exists:
data = self.visa_handle.ask('MMEM:DATA? "%s"' %name)
logging.debug(__name__ + ' : File exists on instrument, loading \
into local memory')
# string alsvolgt opgebouwd: '#' <lenlen1> <len> 'MAGIC 1000\r\n' '#' <len waveform> 'CLOCK ' <clockvalue>
len1 = int(data[1])
len2 = int(data[2:2+len1])
i = len1
tekst = ""
while (tekst !='#'):
tekst = data[i]
i = i+1
len3 = int(data[i])
len4 = int(data[i+1:i+1+len3])
w = []
m1 = []
m2 = []
for q in range(i+1+len3, i+1+len3+len4, 5):
j = int(q)
c, d = struct.unpack('<fB', data[j:5+j])
w.append(c)
m2.append(int(d/2))
m1.append(d-2*int(d/2))
clock = float(data[i+1+len3+len4+5:len(data)])
self._values['files'][name] = {}
self._values['files'][name]['w'] = w
self._values['files'][name]['m1'] = m1
self._values['files'][name]['m2'] = m2
self._values['files'][name]['clock'] = clock
self._values['files'][name]['numpoints'] = len(w)
self._values['recent_channel_%s' %channel] = self._values['files'][name]
self._values['recent_channel_%s' %channel]['filename'] = name
else:
logging.error(__name__ + ' : Invalid filename specified %s' %name)
if (self._numpoints==self._values['files'][name]['numpoints']):
logging.debug(__name__ + ' : Set file %s on channel %s' % (name, channel))
self.visa_handle.write('SOUR%s:FUNC:USER "%s","MAIN"' % (channel, name))
else:
self.visa_handle.write('SOUR%s:FUNC:USER "%s","MAIN"' % (channel, name))
logging.warning(__name__ + ' : Verkeerde lengte %s ipv %s'
%(self._values['files'][name]['numpoints'], self._numpoints))
# Ask for string with filenames
def get_filenames(self):
logging.debug(__name__ + ' : Read filenames from instrument')
return self.visa_handle.ask('MMEM:CAT? "MAIN"')
def return_self(self):
return self
# Send waveform to the device
def send_waveform(self, w, m1, m2, filename, clock):
'''
Sends a complete waveform. All parameters need to be specified.
choose a file extension 'wfm' (must end with .pat)
See also: resend_waveform()
Input:
w (float[numpoints]) : waveform
m1 (int[numpoints]) : marker1
m2 (int[numpoints]) : marker2
filename (str) : filename
clock (int) : frequency (Hz)
Output:
None
'''
logging.debug(__name__ + ' : Sending waveform %s to instrument' % filename)
# Check for errors
dim = len(w)
if (not((len(w) == len(m1)) and ((len(m1) == len(m2))))):
return 'error'
self._values['files'][filename] = {}
self._values['files'][filename]['w'] = w
self._values['files'][filename]['m1'] = m1
self._values['files'][filename]['m2'] = m2
self._values['files'][filename]['clock'] = clock
self._values['files'][filename]['numpoints'] = len(w)
m = m1 + np.multiply(m2, 2)
ws = ''
for i in range(0, len(w)):
ws = ws + struct.pack('<fB', w[i], int(m[i]))
s1 = 'MMEM:DATA "%s",' % filename
s3 = 'MAGIC 1000\n'
s5 = ws
s6 = 'CLOCK %.10e\n' % clock
s4 = '#' + str(len(str(len(s5)))) + str(len(s5))
lenlen = str(len(str(len(s6) + len(s5) + len(s4) + len(s3))))
s2 = '#' + lenlen + str(len(s6) + len(s5) + len(s4) + len(s3))
mes = s1 + s2 + s3 + s4 + s5 + s6
self.visa_handle.write(mes)
def send_pattern(self, w, m1, m2, filename, clock):
'''
Sends a pattern file.
similar to waveform except diff file extension
number of poitns different. diff byte conversion
See also: resend_waveform()
Input:
w (float[numpoints]) : waveform
m1 (int[numpoints]) : marker1
m2 (int[numpoints]) : marker2
filename (str) : filename
clock (int) : frequency (Hz)
Output:
None
'''
logging.debug(__name__ + ' : Sending pattern %s to instrument' % filename)
# Check for errors
dim = len(w)
if (not((len(w)==len(m1)) and ((len(m1)==len(m2))))):
return 'error'
self._values['files'][filename]={}
self._values['files'][filename]['w']=w
self._values['files'][filename]['m1']=m1
self._values['files'][filename]['m2']=m2
self._values['files'][filename]['clock']=clock
self._values['files'][filename]['numpoints']=len(w)
m = m1 + np.multiply(m2, 2)
ws = ''
for i in range(0, len(w)):
ws = ws + struct.pack('<fB', w[i], int(m[i]))
s1 = 'MMEM:DATA "%s",' % filename
s3 = 'MAGIC 2000\n'
s5 = ws
s6 = 'CLOCK %.10e\n' % clock
s4 = '#' + str(len(str(len(s5)))) + str(len(s5))
lenlen=str(len(str(len(s6) + len(s5) + len(s4) + len(s3))))
s2 = '#' + lenlen + str(len(s6) + len(s5) + len(s4) + len(s3))
mes = s1 + s2 + s3 + s4 + s5 + s6
self.visa_handle.write(mes)
def resend_waveform(self, channel, w=[], m1=[], m2=[], clock=[]):
'''
Resends the last sent waveform for the designated channel
Overwrites only the parameters specifiedta
Input: (mandatory)
channel (int) : 1 or 2, the number of the designated channel
Input: (optional)
w (float[numpoints]) : waveform
m1 (int[numpoints]) : marker1
m2 (int[numpoints]) : marker2
clock (int) : frequency
Output:
None
'''
filename = self._values['recent_channel_%s' %channel]['filename']
logging.debug(__name__ + ' : Resending %s to channel %s' % (filename, channel))
if (w==[]):
w = self._values['recent_channel_%s' %channel]['w']
if (m1==[]):
m1 = self._values['recent_channel_%s' %channel]['m1']
if (m2==[]):
m2 = self._values['recent_channel_%s' %channel]['m2']
if (clock==[]):
clock = self._values['recent_channel_%s' %channel]['clock']
if not ( (len(w) == self._numpoints) and (len(m1) == self._numpoints) and (len(m2) == self._numpoints)):
logging.error(__name__ + ' : one (or more) lengths of waveforms do not match with numpoints')
self.send_waveform(w, m1, m2, filename, clock)
self.do_set_filename(filename, channel)
def delete_all_waveforms_from_list(self):
'''
for compatibillity with awg, is not relevant for AWG520 since it
has no waveform list
'''
pass
def send_sequence(self, wfs, rep, wait, goto, logic_jump, filename):
'''
Sends a sequence file (for the moment only for ch1)
Args:
wfs: list of filenames
Returs:
None
'''
logging.debug(__name__ + ' : Sending sequence %s to instrument' % filename)
N = str(len(rep))
try:
wfs.remove(N*[None])
except ValueError:
pass
s1 = 'MMEM:DATA "%s",' % filename
if len(np.shape(wfs)) ==1:
s3 = 'MAGIC 3001\n'
s5 = ''
for k in range(len(rep)):
s5 = s5+ '"%s",%s,%s,%s,%s\n'%(wfs[k],rep[k],wait[k],goto[k],logic_jump[k])
else:
s3 = 'MAGIC 3002\n'
s5 = ''
for k in range(len(rep)):
s5 = s5+ '"%s","%s",%s,%s,%s,%s\n'%(wfs[0][k],wfs[1][k],rep[k],wait[k],goto[k],logic_jump[k])
s4 = 'LINES %s\n'%N
lenlen=str(len(str(len(s5) + len(s4) + len(s3))))
s2 = '#' + lenlen + str(len(s5) + len(s4) + len(s3))
mes = s1 + s2 + s3 + s4 + s5
self.visa_handle.write(mes)
def send_sequence2(self,wfs1,wfs2,rep,wait,goto,logic_jump,filename):
'''
Sends a sequence file
Args:
wfs1: list of filenames for ch1 (all must end with .pat)
wfs2: list of filenames for ch2 (all must end with .pat)
rep: list
wait: list
goto: list
logic_jump: list
filename: name of output file (must end with .seq)
Returns:
None
'''
logging.debug(__name__ + ' : Sending sequence %s to instrument' % filename)
N = str(len(rep))
s1 = 'MMEM:DATA "%s",' % filename
s3 = 'MAGIC 3002\n'
s4 = 'LINES %s\n'%N
s5 = ''
for k in range(len(rep)):
s5 = s5+ '"%s","%s",%s,%s,%s,%s\n'%(wfs1[k],wfs2[k],rep[k],wait[k],goto[k],logic_jump[k])
lenlen=str(len(str(len(s5) + len(s4) + len(s3))))
s2 = '#' + lenlen + str(len(s5) + len(s4) + len(s3))
mes = s1 + s2 + s3 + s4 + s5
self.visa_handle.write(mes)
def set_sequence(self,filename):
'''
loads a sequence file on all channels.
Waveforms/patterns to be executed on respective channel
must be defined inside the sequence file itself
make sure to send all waveforms before setting a seq
'''
self.visa_handle.write('SOUR%s:FUNC:USER "%s","MAIN"' % (1, filename))
def load_and_set_sequence(self,wfs,rep,wait,goto,logic_jump,filename):
'''
Loads and sets the awg sequecne
'''
self.send_sequence(wfs,rep,wait,goto,logic_jump,filename)
self.set_sequence(filename)
| 34.635277 | 118 | 0.51914 |
adab49da32b23fb74b4093f623e84359a42f89df | 1,672 | py | Python | setup.py | guillaumevincent/rtfdoc | d640d4ec931775830598b8f1eb126c6389d7333a | [
"BSD-2-Clause"
]
| 1 | 2015-07-25T22:37:04.000Z | 2015-07-25T22:37:04.000Z | setup.py | guillaumevincent/rtfdoc | d640d4ec931775830598b8f1eb126c6389d7333a | [
"BSD-2-Clause"
]
| null | null | null | setup.py | guillaumevincent/rtfdoc | d640d4ec931775830598b8f1eb126c6389d7333a | [
"BSD-2-Clause"
]
| 1 | 2015-07-26T01:49:56.000Z | 2015-07-26T01:49:56.000Z | from os import path
from codecs import open
from setuptools import setup
from rtfdoc import __version__
base_dir = path.abspath(path.dirname(__file__))
with open(path.join(base_dir, 'LICENSE'), encoding='utf-8') as f:
LICENSE = f.read()
REQUIREMENTS = [
'CommonMark>=0.5.4',
'Jinja2>=2.7.3'
]
setup(
name='rtfdoc',
version=__version__,
description='create beautiful and intelligent markdown documentation',
long_description='''rtfdoc is a tool that makes it easy to create intelligent and beautiful documentation for software projects, consisting of multiple markdown sources.''',
url='http://rtfdoc.com',
author='Guillaume Vincent',
author_email='[email protected]',
license=LICENSE,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
'Topic :: Utilities',
],
keywords='markdown documentation',
packages=['rtfdoc'],
install_requires=REQUIREMENTS,
include_package_data=True,
entry_points={
'console_scripts': [
'rtfdoc-build = rtfdoc.build:main',
'rtfdoc-quickstart = rtfdoc.quickstart:main',
],
},
)
| 30.962963 | 177 | 0.642943 |
bc30e3dce803acae500ded31f26bf9ff253af2f6 | 2,490 | py | Python | wazimap_ng/datasets/admin/__init__.py | OpenUpSA/wazimap-ng | 57334e6da319482aa4ff2fde86c4ec27c30ee8d6 | [
"Apache-2.0"
]
| 11 | 2019-12-31T20:27:22.000Z | 2022-03-10T03:55:38.000Z | wazimap_ng/datasets/admin/__init__.py | OpenUpSA/wazimap-ng | 57334e6da319482aa4ff2fde86c4ec27c30ee8d6 | [
"Apache-2.0"
]
| 164 | 2020-02-06T15:02:22.000Z | 2022-03-30T22:42:00.000Z | wazimap_ng/datasets/admin/__init__.py | OpenUpSA/wazimap-ng | 57334e6da319482aa4ff2fde86c4ec27c30ee8d6 | [
"Apache-2.0"
]
| 16 | 2020-01-03T20:30:24.000Z | 2022-01-11T11:05:15.000Z | from django.contrib import admin
from django.contrib.postgres import fields
from django_json_widget.widgets import JSONEditorWidget
from treebeard.admin import TreeAdmin
from treebeard.forms import movenodeform_factory
from .indicator_data_admin import IndicatorDataAdmin
from .dataset_admin import DatasetAdmin
from .indicator_admin import IndicatorAdmin
from .dataset_file_admin import DatasetFileAdmin
from .group_admin import GroupAdmin
from .. import models
from ...boundaries.models import GeographyBoundary
from wazimap_ng.general.admin.admin_base import HistoryAdmin
from wazimap_ng.general.admin.forms import HistoryAdminForm
class GeographyBoundaryInline(admin.TabularInline):
model = GeographyBoundary
exclude = ("geom", "geom_cache", "area",)
readonly_fields = ("version",)
extra = 0
can_delete = False
def has_add_permission(self, request, obj):
return False
@admin.register(models.Geography)
class GeographyAdmin(TreeAdmin):
form = movenodeform_factory(models.Geography)
def hierarchy(obj):
return ", ".join(h.name for h in obj.get_root().geographyhierarchy_set.all())
list_display = (
"name", "code", "level", hierarchy, "created", "updated"
)
search_fields = ("name", "code")
list_filter = ("level", "geographyboundary__version")
inlines = (GeographyBoundaryInline,)
@admin.register(models.GeographyHierarchy)
class GeographyHierarchyAdmin(HistoryAdmin):
autocomplete_fields = ['root_geography']
formfield_overrides = {
fields.JSONField: {"widget": JSONEditorWidget},
}
list_display = (
"name", "created", "updated"
)
fieldsets = (
("", {
"fields": (
"name", "root_geography", "description",
)
}),
)
form = HistoryAdminForm
@admin.register(models.Universe)
class UniverseAdmin(HistoryAdmin):
formfield_overrides = {
fields.JSONField: {"widget": JSONEditorWidget},
}
list_display = (
"label", "created", "updated"
)
fieldsets = (
("", {
"fields": (
"label", "filters",
)
}),
)
form = HistoryAdminForm
@admin.register(models.Licence)
class LicenceAdmin(admin.ModelAdmin):
list_display = (
"name", "created", "updated"
)
@admin.register(models.Version)
class VersionAdmin(admin.ModelAdmin):
list_display = (
"name", "created", "updated"
)
| 25.151515 | 85 | 0.671888 |
2b8e6339cb8ce3b65f46ef9e44e9a99c36e73a30 | 476 | py | Python | libs/sqlobject/inheritance/tests/testDestroyCascade.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
]
| 422 | 2015-01-08T14:08:08.000Z | 2022-02-07T11:47:37.000Z | libs/sqlobject/inheritance/tests/testDestroyCascade.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
]
| 581 | 2015-01-01T08:07:16.000Z | 2022-02-23T11:44:37.000Z | libs/sqlobject/inheritance/tests/testDestroyCascade.py | scambra/HTPC-Manager | 1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d | [
"MIT"
]
| 162 | 2015-01-01T00:21:16.000Z | 2022-02-23T02:36:04.000Z | from sqlobject import *
from sqlobject.inheritance import *
from sqlobject.tests.dbtest import *
class TestCascade1(InheritableSQLObject):
dummy = IntCol()
class TestCascade2(TestCascade1):
c = ForeignKey('TestCascade3', cascade='null')
class TestCascade3(SQLObject):
dummy = IntCol()
def test_destroySelf():
setupClass([TestCascade1, TestCascade3, TestCascade2])
c = TestCascade3(dummy=1)
b = TestCascade2(cID=c.id, dummy=1)
c.destroySelf()
| 22.666667 | 58 | 0.731092 |
80f8cb0cffd024992aef3424b27b37120394de53 | 917 | py | Python | problems/test_0646_sort_by_start.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
]
| 1 | 2017-06-17T23:47:17.000Z | 2017-06-17T23:47:17.000Z | problems/test_0646_sort_by_start.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
]
| null | null | null | problems/test_0646_sort_by_start.py | chrisxue815/leetcode_python | dec3c160d411a5c19dc8e9d96e7843f0e4c36820 | [
"Unlicense"
]
| null | null | null | import unittest
from typing import List
import utils
# O(nlog(n)) time. O(1) space. Interval, sorting by end, greedy.
class Solution:
def findLongestChain(self, pairs: List[List[int]]) -> int:
if not pairs:
return 0
pairs.sort()
num_non_overlaps = 0
prev_end = pairs[0][0] - 1
for start, end in pairs:
if prev_end < start:
num_non_overlaps += 1
prev_end = end
else:
prev_end = min(prev_end, end)
return num_non_overlaps
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().findLongestChain(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
| 23.512821 | 70 | 0.582334 |
cf9f62e501a0468f9a185d4f3c1951cc025a482f | 817 | py | Python | full-problems/facingSun.py | vikas-t/DS-Algo | ea654d1cad5374c824c52da9d3815a9546eb43fa | [
"Apache-2.0"
]
| null | null | null | full-problems/facingSun.py | vikas-t/DS-Algo | ea654d1cad5374c824c52da9d3815a9546eb43fa | [
"Apache-2.0"
]
| null | null | null | full-problems/facingSun.py | vikas-t/DS-Algo | ea654d1cad5374c824c52da9d3815a9546eb43fa | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python3
# https://practice.geeksforgeeks.org/problems/facing-the-sun/0
def sol(arr, n):
"""
Create an auxiliary array that stores the index of the building which is
tallest till that index. For any building if the tallest bulding is at the
same index(same building) it can recieve sunlight
"""
res = 0
lmax = [None for _ in range(n)]
lmax[0] = 0
# The first building receives the sunlight always
for i in range(1, n):
if arr[i] > arr[lmax[i-1]]:
lmax[i] = i
else:
lmax[i] = lmax[i-1]
# If the current building is taller than the previous lmax store the
# current index otherwise store the previous lmax
#print(lmax)
for i in range(n):
if i == lmax[i]:
res+=1
return res
| 29.178571 | 78 | 0.602203 |
4b8f15b645cd5400918f03d8dab01be7ddce106b | 498 | py | Python | histPlot.py | rhiggins2308/G00364712-project2018 | a12817bfba14863595556b1771143993dbf13f37 | [
"Apache-2.0"
]
| null | null | null | histPlot.py | rhiggins2308/G00364712-project2018 | a12817bfba14863595556b1771143993dbf13f37 | [
"Apache-2.0"
]
| null | null | null | histPlot.py | rhiggins2308/G00364712-project2018 | a12817bfba14863595556b1771143993dbf13f37 | [
"Apache-2.0"
]
| null | null | null | # Robert Higgins (G00364712) - Final Submission 2018-04-29
# 52167-Programming & Scripting
# Project 2018
# generate Histograms for Fishers Iris Dataset
def getData():
return numpy.genfromtxt('data/iris.csv', delimiter=',')
import numpy
import matplotlib.pyplot as pl
iris = getData()
# for various histogram plots, change the y-value in the iris array
# values 0 <= y <= 3 are acceptable
# 0 = petal length
# 1 = petal width
# 2 = sepal length
# 3 = sepal width
pl.hist(iris[:,3])
pl.show() | 24.9 | 67 | 0.716867 |
56effed4f983564a3b9dbe9226d10d8074e6cbbc | 7,490 | py | Python | allennlp/modules/seq2seq_encoders/stacked_self_attention.py | hellozhaojian/allennlp | c22809f27a0764f6948a5c21c4d35d59845c39ac | [
"Apache-2.0"
]
| null | null | null | allennlp/modules/seq2seq_encoders/stacked_self_attention.py | hellozhaojian/allennlp | c22809f27a0764f6948a5c21c4d35d59845c39ac | [
"Apache-2.0"
]
| null | null | null | allennlp/modules/seq2seq_encoders/stacked_self_attention.py | hellozhaojian/allennlp | c22809f27a0764f6948a5c21c4d35d59845c39ac | [
"Apache-2.0"
]
| null | null | null | from typing import List
from overrides import overrides
import torch
from torch.nn import Dropout
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.layer_norm import LayerNorm
from allennlp.modules.seq2seq_encoders.multi_head_self_attention import MultiHeadSelfAttention
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.nn.activations import Activation
from allennlp.nn.util import add_positional_features
@Seq2SeqEncoder.register("stacked_self_attention")
class StackedSelfAttentionEncoder(Seq2SeqEncoder):
"""
Implements a stacked self-attention encoder similar to the Transformer
architecture in `Attention is all you Need
<https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ .
This encoder combines 3 layers in a 'block':
1. A 2 layer FeedForward network.
2. Multi-headed self attention, which uses 2 learnt linear projections
to perform a dot-product similarity between every pair of elements
scaled by the square root of the sequence length.
3. Layer Normalisation.
These are then stacked into ``num_layers`` layers.
Parameters
----------
input_dim : ``int``, required.
The input dimension of the encoder.
hidden_dim : ``int``, required.
The hidden dimension used for the _input_ to self attention layers
and the _output_ from the feedforward layers.
projection_dim : ``int``, required.
The dimension of the linear projections for the self-attention layers.
feedforward_hidden_dim : ``int``, required.
The middle dimension of the FeedForward network. The input and output
dimensions are fixed to ensure sizes match up for the self attention layers.
num_layers : ``int``, required.
The number of stacked self attention -> feedfoward -> layer normalisation blocks.
num_attention_heads : ``int``, required.
The number of attention heads to use per layer.
use_positional_encoding : ``bool``, optional, (default = True)
Whether to add sinusoidal frequencies to the input tensor. This is strongly recommended,
as without this feature, the self attention layers have no idea of absolute or relative
position (as they are just computing pairwise similarity between vectors of elements),
which can be important features for many tasks.
dropout_prob : ``float``, optional, (default = 0.1)
The dropout probability for the feedforward network.
residual_dropout_prob : ``float``, optional, (default = 0.2)
The dropout probability for the residual connections.
attention_dropout_prob : ``float``, optional, (default = 0.1)
The dropout probability for the attention distributions in each attention layer.
""" # noqa
def __init__(
self,
input_dim: int,
hidden_dim: int,
projection_dim: int,
feedforward_hidden_dim: int,
num_layers: int,
num_attention_heads: int,
use_positional_encoding: bool = True,
dropout_prob: float = 0.1,
residual_dropout_prob: float = 0.2,
attention_dropout_prob: float = 0.1,
) -> None:
super().__init__()
self._use_positional_encoding = use_positional_encoding
self._attention_layers: List[MultiHeadSelfAttention] = []
self._feedfoward_layers: List[FeedForward] = []
self._layer_norm_layers: List[LayerNorm] = []
self._feed_forward_layer_norm_layers: List[LayerNorm] = []
feedfoward_input_dim = input_dim
for i in range(num_layers):
feedfoward = FeedForward(
feedfoward_input_dim,
activations=[Activation.by_name("relu")(), Activation.by_name("linear")()],
hidden_dims=[feedforward_hidden_dim, hidden_dim],
num_layers=2,
dropout=dropout_prob,
)
# Note: Please use `ModuleList` in new code. It provides better
# support for running on multiple GPUs. We've kept `add_module` here
# solely for backwards compatibility with existing serialized models.
self.add_module(f"feedforward_{i}", feedfoward)
self._feedfoward_layers.append(feedfoward)
feedforward_layer_norm = LayerNorm(feedfoward.get_output_dim())
self.add_module(f"feedforward_layer_norm_{i}", feedforward_layer_norm)
self._feed_forward_layer_norm_layers.append(feedforward_layer_norm)
self_attention = MultiHeadSelfAttention(
num_heads=num_attention_heads,
input_dim=hidden_dim,
attention_dim=projection_dim,
values_dim=projection_dim,
attention_dropout_prob=attention_dropout_prob,
)
self.add_module(f"self_attention_{i}", self_attention)
self._attention_layers.append(self_attention)
layer_norm = LayerNorm(self_attention.get_output_dim())
self.add_module(f"layer_norm_{i}", layer_norm)
self._layer_norm_layers.append(layer_norm)
feedfoward_input_dim = hidden_dim
self.dropout = Dropout(residual_dropout_prob)
self._input_dim = input_dim
self._output_dim = self._attention_layers[-1].get_output_dim()
@overrides
def get_input_dim(self) -> int:
return self._input_dim
@overrides
def get_output_dim(self) -> int:
return self._output_dim
@overrides
def is_bidirectional(self):
return False
@overrides
def forward(self, inputs: torch.Tensor, mask: torch.Tensor):
if self._use_positional_encoding:
output = add_positional_features(inputs)
else:
output = inputs
for i in range(len(self._attention_layers)):
# It's necessary to use `getattr` here because the elements stored
# in the lists are not replicated by torch.nn.parallel.replicate
# when running on multiple GPUs. Please use `ModuleList` in new
# code. It handles this issue transparently. We've kept `add_module`
# (in conjunction with `getattr`) solely for backwards compatibility
# with existing serialized models.
attention = getattr(self, f"self_attention_{i}")
feedforward = getattr(self, f"feedforward_{i}")
feedforward_layer_norm = getattr(self, f"feedforward_layer_norm_{i}")
layer_norm = getattr(self, f"layer_norm_{i}")
cached_input = output
# Project output of attention encoder through a feedforward
# network and back to the input size for the next layer.
# shape (batch_size, timesteps, input_size)
feedforward_output = feedforward(output)
feedforward_output = self.dropout(feedforward_output)
if feedforward_output.size() == cached_input.size():
# First layer might have the wrong size for highway
# layers, so we exclude it here.
feedforward_output = feedforward_layer_norm(feedforward_output + cached_input)
# shape (batch_size, sequence_length, hidden_dim)
attention_output = attention(feedforward_output, mask)
output = layer_norm(self.dropout(attention_output) + feedforward_output)
return output
| 44.850299 | 130 | 0.680374 |
c57e94826af412f4dd06be37fe871a91ceed4ede | 3,827 | py | Python | airbyte-integrations/connectors/source-chargebee/unit_tests/test_source.py | Danucas/airbyte | 9e77879a7a3b1a5a559a3df9fa85056365b6fbef | [
"MIT"
]
| 1 | 2022-03-29T01:08:58.000Z | 2022-03-29T01:08:58.000Z | airbyte-integrations/connectors/source-chargebee/unit_tests/test_source.py | Danucas/airbyte | 9e77879a7a3b1a5a559a3df9fa85056365b6fbef | [
"MIT"
]
| 5 | 2022-02-22T14:49:48.000Z | 2022-03-19T10:43:08.000Z | airbyte-integrations/connectors/source-chargebee/unit_tests/test_source.py | Danucas/airbyte | 9e77879a7a3b1a5a559a3df9fa85056365b6fbef | [
"MIT"
]
| 1 | 2022-03-11T06:21:24.000Z | 2022-03-11T06:21:24.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from unittest.mock import MagicMock
import responses
from airbyte_cdk.models import AirbyteConnectionStatus, AirbyteMessage, ConnectorSpecification, Status, Type
from jsonschema import Draft7Validator
from source_chargebee import SourceChargebee
@responses.activate
def test_discover_v1(test_config_v1):
source = SourceChargebee()
logger_mock = MagicMock()
catalog = source.discover(logger_mock, test_config_v1)
catalog = AirbyteMessage(type=Type.CATALOG, catalog=catalog).dict(exclude_unset=True)
schemas = [stream["json_schema"] for stream in catalog["catalog"]["streams"]]
for schema in schemas:
Draft7Validator.check_schema(schema)
@responses.activate
def test_discover_v2(test_config_v2):
source = SourceChargebee()
logger_mock = MagicMock()
catalog = source.discover(logger_mock, test_config_v2)
catalog = AirbyteMessage(type=Type.CATALOG, catalog=catalog).dict(exclude_unset=True)
schemas = [stream["json_schema"] for stream in catalog["catalog"]["streams"]]
for schema in schemas:
Draft7Validator.check_schema(schema)
def test_spec():
source = SourceChargebee()
logger_mock = MagicMock()
spec = source.spec(logger_mock)
assert isinstance(spec, ConnectorSpecification)
@responses.activate
def test_check_v1(test_config_v1):
responses.add(
responses.GET,
"https://airbyte-test.chargebee.com/api/v2/subscriptions",
json={"list": [{"subscription": {"id": "cbdemo_cancelled_sub"}, "customer": {}, "card": {}}]},
)
source = SourceChargebee()
logger_mock = MagicMock()
assert source.check(logger_mock, test_config_v1) == AirbyteConnectionStatus(status=Status.SUCCEEDED)
assert len(responses.calls) == 1
@responses.activate
def test_check_v2(test_config_v2):
responses.add(
responses.GET,
"https://airbyte-test.chargebee.com/api/v2/subscriptions",
json={"list": [{"subscription": {"id": "cbdemo_cancelled_sub"}, "customer": {}, "card": {}}]},
)
source = SourceChargebee()
logger_mock = MagicMock()
assert source.check(logger_mock, test_config_v2) == AirbyteConnectionStatus(status=Status.SUCCEEDED)
assert len(responses.calls) == 1
@responses.activate
def test_check_error_v1(test_config_v1):
source = SourceChargebee()
logger_mock = MagicMock()
assert source.check(logger_mock, test_config_v1).status == Status.FAILED
assert len(responses.calls) == 1
@responses.activate
def test_check_error_v2(test_config_v2):
source = SourceChargebee()
logger_mock = MagicMock()
assert source.check(logger_mock, test_config_v2).status == Status.FAILED
assert len(responses.calls) == 1
@responses.activate
def test_source_streams_v1(test_config_v1):
source = SourceChargebee()
streams = source.streams(test_config_v1)
assert len(streams) == 10
actual_stream_names = {stream.name for stream in streams}
expected_stream_names = {
"coupon",
"credit_note",
"customer",
"event",
"invoice",
"order",
"subscription",
"addon",
"plan",
"transaction",
}
assert expected_stream_names == actual_stream_names
@responses.activate
def test_source_streams_v2(test_config_v2):
source = SourceChargebee()
streams = source.streams(test_config_v2)
assert len(streams) == 11
actual_stream_names = {stream.name for stream in streams}
expected_stream_names = {
"coupon",
"credit_note",
"customer",
"event",
"invoice",
"order",
"subscription",
"item",
"item_price",
"attached_item",
"transaction",
}
assert expected_stream_names == actual_stream_names
| 30.616 | 108 | 0.695323 |
430c7f47633934cf6e802741731a390ff26b0882 | 893 | py | Python | examples/sklearn/grid_search.py | kintatta/d3rl | 0674c4898927a53f36c5c875d8f217337f22d364 | [
"MIT"
]
| null | null | null | examples/sklearn/grid_search.py | kintatta/d3rl | 0674c4898927a53f36c5c875d8f217337f22d364 | [
"MIT"
]
| null | null | null | examples/sklearn/grid_search.py | kintatta/d3rl | 0674c4898927a53f36c5c875d8f217337f22d364 | [
"MIT"
]
| null | null | null | from d3rlpy.algos import DQN
from d3rlpy.datasets import get_cartpole
from d3rlpy.metrics.scorer import evaluate_on_environment
from d3rlpy.context import parallel
from sklearn.model_selection import GridSearchCV
# obtain dataset
dataset, env = get_cartpole()
# setup algowithm with GPU enabled
dqn = DQN(n_epochs=1, use_gpu=True)
# grid search with multiple GPUs assigned to individual processs
with parallel():
env_score = evaluate_on_environment(env)
gscv = GridSearchCV(estimator=dqn,
param_grid={
'learning_rate': [1e-3, 3e-4, 1e-4],
'gamma': [0.99, 0.95, 0.9]
},
scoring={'environment': env_score},
refit=False,
n_jobs=3)
gscv.fit(dataset.episodes, show_progress=False)
print(gscv.grid_scores_)
| 33.074074 | 64 | 0.621501 |
1a4b5921b730831e2a919d37d23bc559fba281f2 | 14,188 | py | Python | spinup/algos/pytorch/ddpg/ddpg.py | MLRG-CEFET-RJ/DRL-ALM | 19e08ee71660c0ce294b0cea8d36e8ac637ddbcf | [
"MIT"
]
| 3 | 2020-03-05T09:49:00.000Z | 2021-05-18T22:03:24.000Z | spinup/algos/pytorch/ddpg/ddpg.py | MLRG-CEFET-RJ/DRL-ALM | 19e08ee71660c0ce294b0cea8d36e8ac637ddbcf | [
"MIT"
]
| null | null | null | spinup/algos/pytorch/ddpg/ddpg.py | MLRG-CEFET-RJ/DRL-ALM | 19e08ee71660c0ce294b0cea8d36e8ac637ddbcf | [
"MIT"
]
| null | null | null | from copy import deepcopy
import numpy as np
import pandas as pd
import torch
from torch.optim import Adam
import gym
import time
import spinup.algos.pytorch.ddpg.core as core
from spinup.utils.logx import EpochLogger
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for DDPG agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(obs=self.obs_buf[idxs],
obs2=self.obs2_buf[idxs],
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs])
return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in batch.items()}
"""
The following parameters have been changed:
env_fn: default value set to core.ALMEnv
epochs: default value changed from 100 to 300
act_noise: default value changed from .1 to .01
time_horizon: added parameter, with default value 80
discount_rate: added parameter, with default value .06
"""
def ddpg(env_fn = core.ALMEnv, actor_critic = core.MLPActorCritic,
ac_kwargs = dict(), seed = 0, steps_per_epoch = 4000, epochs = 300,
replay_size = int(1e6), gamma = 0.99, polyak=0.995, pi_lr = 1e-3,
q_lr = 1e-3, batch_size = 100, start_steps = 10000, update_after = 1000,
update_every = 50, act_noise = .01, num_test_episodes = 10,
max_ep_len = 1000, logger_kwargs = dict(), save_freq = 1,
time_horizon = 80, discount_rate = .06):
"""
Deep Deterministic Policy Gradient (DDPG)
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
In this version, the default environment is 'ALMEnv'
actor_critic: The constructor method for a PyTorch Module with an ``act``
method, a ``pi`` module, and a ``q`` module. The ``act`` method and
``pi`` module should accept batches of observations as inputs,
and ``q`` should accept a batch of observations and a batch of
actions as inputs. When called, these should return:
=========== ================ ======================================
Call Output Shape Description
=========== ================ ======================================
``act`` (batch, act_dim) | Numpy array of actions for each
| observation.
``pi`` (batch, act_dim) | Tensor containing actions from policy
| given observations.
``q`` (batch,) | Tensor containing the current estimate
| of Q* for the provided observations
| and actions. (Critical: make sure to
| flatten this!)
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object
you provided to DDPG.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
pi_lr (float): Learning rate for policy.
q_lr (float): Learning rate for Q-networks.
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
update_after (int): Number of env interactions to collect before
starting to do gradient descent updates. Ensures replay buffer
is full enough for useful updates.
update_every (int): Number of env interactions that should elapse
between gradient descent updates. Note: Regardless of how long
you wait between updates, the ratio of env steps to gradient steps
is locked to 1.
act_noise (float): Stddev for Gaussian exploration noise added to
policy at training time. (At test time, no noise is added.)
num_test_episodes (int): Number of episodes to test the deterministic
policy at the end of each epoch.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
torch.manual_seed(seed)
np.random.seed(seed)
# env, test_env = env_fn(), env_fn() original OpenAI SpinningUp entry
env = env_fn(T = time_horizon, rate = discount_rate) # Added by the author
test_env = env_fn(T = time_horizon, rate = discount_rate) # Added by the author
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Create actor-critic module and target networks
ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)
ac_targ = deepcopy(ac)
# Freeze target networks with respect to optimizers (only update via polyak averaging)
for p in ac_targ.parameters():
p.requires_grad = False
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Count variables (protip: try to get a feel for how different size networks behave!)
var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.q])
logger.log('\nNumber of parameters: \t pi: %d, \t q: %d\n'%var_counts)
# Set up function for computing DDPG Q-loss
def compute_loss_q(data):
o, a, r, o2, d = data['obs'], data['act'], data['rew'], data['obs2'], data['done']
q = ac.q(o,a)
# Bellman backup for Q function
with torch.no_grad():
q_pi_targ = ac_targ.q(o2, ac_targ.pi(o2))
backup = r + gamma * (1 - d) * q_pi_targ
# MSE loss against Bellman backup
loss_q = ((q - backup)**2).mean()
# Useful info for logging
loss_info = dict(QVals=q.detach().numpy())
return loss_q, loss_info
# Set up function for computing DDPG pi loss
def compute_loss_pi(data):
o = data['obs']
q_pi = ac.q(o, ac.pi(o))
return -q_pi.mean()
# Set up optimizers for policy and q-function
pi_optimizer = Adam(ac.pi.parameters(), lr=pi_lr)
q_optimizer = Adam(ac.q.parameters(), lr=q_lr)
# Set up model saving
logger.setup_pytorch_saver(ac)
def update(data):
# First run one gradient descent step for Q.
q_optimizer.zero_grad()
loss_q, loss_info = compute_loss_q(data)
loss_q.backward()
q_optimizer.step()
# Freeze Q-network so you don't waste computational effort
# computing gradients for it during the policy learning step.
for p in ac.q.parameters():
p.requires_grad = False
# Next run one gradient descent step for pi.
pi_optimizer.zero_grad()
loss_pi = compute_loss_pi(data)
loss_pi.backward()
pi_optimizer.step()
# Unfreeze Q-network so you can optimize it at next DDPG step.
for p in ac.q.parameters():
p.requires_grad = True
# Record things
logger.store(LossQ=loss_q.item(), LossPi=loss_pi.item(), **loss_info)
# Finally, update target networks by polyak averaging.
with torch.no_grad():
for p, p_targ in zip(ac.parameters(), ac_targ.parameters()):
# NB: We use an in-place operations "mul_", "add_" to update target
# params, as opposed to "mul" and "add", which would make new tensors.
p_targ.data.mul_(polyak)
p_targ.data.add_((1 - polyak) * p.data)
def get_action(o, noise_scale):
a = ac.act(torch.as_tensor(o, dtype=torch.float32))
a = a * (noise_scale * np.random.randn(act_dim) + 1) # Added by the author
return (a / np.sum(a)) # Added by the author
# a += noise_scale * np.random.randn(act_dim) Original OpenAI SpinningUp entry
# return np.clip(a, -act_limit, act_limit) Original OpenAI SpinningUp entry
def test_agent():
for j in range(num_test_episodes):
o, d, ep_ret, ep_len = test_env.reset(), False, 0, 0
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time (noise_scale=0)
o, r, d, _ = test_env.step(get_action(o, 0))
ep_ret += r
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
# Prepare for interaction with environment
total_steps = steps_per_epoch * epochs
start_time = time.time()
o, ep_ret, ep_len = env.reset(), 0, 0
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy (with some noise, via act_noise).
"""
if t > start_steps:
a = get_action(o, act_noise)
else:
a = env.action_space.sample()
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len==max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
# End of trajectory handling
if d or (ep_len == max_ep_len):
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, ep_ret, ep_len = env.reset(), 0, 0
# Update handling
if t >= update_after and t % update_every == 0:
for _ in range(update_every):
batch = replay_buffer.sample_batch(batch_size)
update(data=batch)
# End of epoch handling
if (t+1) % steps_per_epoch == 0:
epoch = (t+1) // steps_per_epoch
# Save model
if (epoch % save_freq == 0) or (epoch == epochs):
logger.save_state({'env': env}, None)
# Test the performance of the deterministic version of the agent.
test_agent()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('QVals', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument('--env', type=str, default='HalfCheetah-v2') Original OpenAI SpinningUp entry
parser.add_argument('--hid', type=int, default=256)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--time_horizon', type = int, default = 80) # Added by the author
parser.add_argument('--discount_rate', type = float, default = 0.06) # Added by the author
parser.add_argument('--exp_name', type=str, default='ddpg')
args = parser.parse_args()
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
ddpg(env_fn = core.ALMEnv, actor_critic=core.MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l),
gamma=args.gamma, seed=args.seed, epochs=args.epochs,
logger_kwargs=logger_kwargs, time_horizon = args.time_horizon,
discount_rate = args.discount_rate)
| 40.887608 | 103 | 0.612983 |
f3c2fbcd5ce1eee3395175c89c6e1eebaf2f0bf9 | 5,902 | py | Python | se_leg_ra/views/ra.py | SUNET/se-leg-ra | ac30e700dda4fceb7a9205b4b2790478cf3ba5b4 | [
"BSD-3-Clause"
]
| null | null | null | se_leg_ra/views/ra.py | SUNET/se-leg-ra | ac30e700dda4fceb7a9205b4b2790478cf3ba5b4 | [
"BSD-3-Clause"
]
| null | null | null | se_leg_ra/views/ra.py | SUNET/se-leg-ra | ac30e700dda4fceb7a9205b4b2790478cf3ba5b4 | [
"BSD-3-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
from flask import Blueprint, current_app, render_template, url_for, request, redirect
from se_leg_ra.forms import DriversLicenseForm, IdCardForm, PassportForm, NationalIDCardForm
from se_leg_ra.decorators import require_eppn
from se_leg_ra.db import IdCardProofing, DriversLicenseProofing, PassportProofing, NationalIdCardProofing
from se_leg_ra.utils import log_and_send_proofing
__author__ = 'lundberg'
se_leg_ra_views = Blueprint('se_leg_ra', __name__, url_prefix='', template_folder='templates')
def get_view_context(form, user):
view_context = {
'form': form,
'action_url': request.path,
'user': user,
'success_message': None,
'error_message': None
}
return view_context
@se_leg_ra_views.route('/', methods=['GET'])
@require_eppn
def index(user):
current_app.logger.debug('GET index')
# Set up the default form
view_context = get_view_context(DriversLicenseForm(), user)
view_context['action_url'] = url_for('se_leg_ra.drivers_license')
return render_template('drivers_license.jinja2', view_context=view_context)
@se_leg_ra_views.route('/login', methods=['GET'])
def login():
current_app.logger.debug('GET login')
login_dict = current_app.config['LOGIN_ALTERNATIVES']
return render_template('login.jinja2', login_alternatives=login_dict)
@se_leg_ra_views.route('/id-card', methods=['GET', 'POST'])
@require_eppn
def id_card(user):
form = IdCardForm()
view_context = get_view_context(form, user)
if form.validate_on_submit():
current_app.logger.debug('id_card form validated')
data = {
'qr_code': form.qr_code.data,
'nin': form.nin.data,
'card_number': form.card_number.data,
'expiry_date': form.expiry_date.data,
'ocular_validation': form.ocular_validation.data
}
current_app.logger.debug('Form data: {}'.format(data))
# Log the vetting attempt
proofing_element = IdCardProofing(current_app.config['RA_APP_ID'], user['eppn'], data['nin'],
data['card_number'], data['qr_code'], data['ocular_validation'],
data['expiry_date'], '2018v1')
view_context = log_and_send_proofing(proofing_element, identity=data['nin'], view_context=view_context)
return render_template('id_card.jinja2', view_context=view_context)
@se_leg_ra_views.route('/drivers-license', methods=['GET', 'POST'])
@require_eppn
def drivers_license(user):
form = DriversLicenseForm()
view_context = get_view_context(form, user)
if form.validate_on_submit():
data = {
'qr_code': form.qr_code.data,
'nin': form.nin.data,
'reference_number': form.reference_number.data,
'expiry_date': form.expiry_date.data,
'ocular_validation': form.ocular_validation.data
}
current_app.logger.debug('Form data: {}'.format(data))
# Log the vetting attempt
proofing_element = DriversLicenseProofing(current_app.config['RA_APP_ID'], user['eppn'], data['nin'],
data['reference_number'], data['qr_code'], data['ocular_validation'],
data['expiry_date'], '2018v1')
view_context = log_and_send_proofing(proofing_element, identity=data['nin'], view_context=view_context)
return render_template('drivers_license.jinja2', view_context=view_context)
@se_leg_ra_views.route('/passport', methods=['GET', 'POST'])
@require_eppn
def passport(user):
form = PassportForm()
view_context = get_view_context(form, user)
if form.validate_on_submit():
data = {
'qr_code': form.qr_code.data,
'nin': form.nin.data,
'expiry_date': form.expiry_date.data,
'passport_number': form.passport_number.data,
'ocular_validation': form.ocular_validation.data
}
current_app.logger.debug('Form data: {}'.format(data))
# Log the vetting attempt
proofing_element = PassportProofing(current_app.config['RA_APP_ID'], user['eppn'], data['nin'],
data['passport_number'], data['qr_code'], data['ocular_validation'],
data['expiry_date'], '2018v1')
view_context = log_and_send_proofing(proofing_element, identity=data['nin'], view_context=view_context)
return render_template('passport.jinja2', view_context=view_context)
@se_leg_ra_views.route('/national-id-card', methods=['GET', 'POST'])
@require_eppn
def national_id_card(user):
form = NationalIDCardForm()
view_context = get_view_context(form, user)
if form.validate_on_submit():
data = {
'qr_code': form.qr_code.data,
'nin': form.nin.data,
'expiry_date': form.expiry_date.data,
'card_number': form.card_number.data,
'ocular_validation': form.ocular_validation.data
}
current_app.logger.debug('Form data: {}'.format(data))
# Log the vetting attempt
proofing_element = NationalIdCardProofing(current_app.config['RA_APP_ID'], user['eppn'], data['nin'],
data['card_number'], data['qr_code'],
data['ocular_validation'], data['expiry_date'], '2018v1')
view_context = log_and_send_proofing(proofing_element, identity=data['nin'], view_context=view_context)
return render_template('national_id_card.jinja2', view_context=view_context)
@se_leg_ra_views.route('/logout', methods=['GET'])
@require_eppn
def logout(user):
current_app.logger.info('User {} logged out'.format(user['eppn']))
return redirect(current_app.config['LOGOUT_URL'])
| 40.986111 | 119 | 0.64978 |
4b640742320c6611df738a7e0061c4f6a09db0ad | 1,058 | py | Python | model_zoo/__init__.py | misads/torch_image_template | db55be6fcebdb6b0c5c739e505b8a7a2eb81c3c1 | [
"MIT"
]
| 5 | 2019-12-23T05:13:15.000Z | 2020-04-09T03:47:53.000Z | torch_template/model_zoo/__init__.py | misads/torch_template | 959be6d4bc368c1c3310c3902cc04271cca0941f | [
"MIT"
]
| null | null | null | torch_template/model_zoo/__init__.py | misads/torch_template | 959be6d4bc368c1c3310c3902cc04271cca0941f | [
"MIT"
]
| null | null | null | from .FFA import FFA
from .linknet import LinkNet
from .linknet import LinkNet50
from .unet import NestedUNet, UNet
from .pix2pix import GlobalGenerator, LocalEnhancer
from .transform_net import TransformNet
from .dense import Dense
model_zoo = {
'FFA': FFA,
'LinkNet': LinkNet,
'LinkNet50': LinkNet50,
'NestedUNet': NestedUNet,
'UNet': UNet,
'GlobalGenerator': GlobalGenerator,
'LocalEnhancer': LocalEnhancer,
'TransformNet': TransformNet,
'Dense': Dense,
}
"""
Model: nf n_params 256 512 256_batch8
UNet 8 852,304 - 569M
NestedUNet 64 36,629,763 1851M 5365M 10037M
FFA - 4,455,913 5509M out of memory out of memory
LinkNet50 - 28,762,115 1051M 1357M
LinkNet - 1,533,635 761M 1883M
Global 64 45,614,595 1415M 1767M 2457M
TransformNet 32 1,673,097 829M 1587M 2615M
Dense ? 11,581,967 907M 1659M 2695M
""" | 33.0625 | 69 | 0.586957 |
496710b10edee25a22becc8d259831ecfb232e8c | 1,634 | py | Python | byceps/services/ticketing/log_service.py | GyBraLAN/byceps | b53087849c10a531b66d08999116fa1bef312a7f | [
"BSD-3-Clause"
]
| null | null | null | byceps/services/ticketing/log_service.py | GyBraLAN/byceps | b53087849c10a531b66d08999116fa1bef312a7f | [
"BSD-3-Clause"
]
| null | null | null | byceps/services/ticketing/log_service.py | GyBraLAN/byceps | b53087849c10a531b66d08999116fa1bef312a7f | [
"BSD-3-Clause"
]
| null | null | null | """
byceps.services.ticketing.log_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from datetime import datetime
from sqlalchemy import select
from ...database import db
from .dbmodels.log import TicketLogEntry as DbTicketLogEntry
from .transfer.log import TicketLogEntry, TicketLogEntryData
from .transfer.models import TicketID
def create_entry(
event_type: str, ticket_id: TicketID, data: TicketLogEntryData
) -> None:
"""Create a ticket log entry."""
entry = build_log_entry(event_type, ticket_id, data)
db.session.add(entry)
db.session.commit()
def build_log_entry(
event_type: str, ticket_id: TicketID, data: TicketLogEntryData
) -> DbTicketLogEntry:
"""Assemble, but not persist, a ticket log entry."""
now = datetime.utcnow()
return DbTicketLogEntry(now, event_type, ticket_id, data)
def get_entries_for_ticket(ticket_id: TicketID) -> list[TicketLogEntry]:
"""Return the log entries for that ticket."""
db_entries = db.session.execute(
select(DbTicketLogEntry)
.filter_by(ticket_id=ticket_id)
.order_by(DbTicketLogEntry.occurred_at)
).scalars().all()
return [_db_entity_to_entry(db_entry) for db_entry in db_entries]
def _db_entity_to_entry(db_entry: DbTicketLogEntry) -> TicketLogEntry:
return TicketLogEntry(
id=db_entry.id,
occurred_at=db_entry.occurred_at,
event_type=db_entry.event_type,
ticket_id=db_entry.ticket_id,
data=db_entry.data.copy(),
)
| 27.694915 | 72 | 0.716034 |
43c024f5d198128ac350efe4ad27c6ac469c47db | 4,771 | py | Python | myslice/lib/remote.py | loicbaron/myslice2 | 32af9462cc9e5654a6e3036978ae74b0a03a2698 | [
"MIT"
]
| null | null | null | myslice/lib/remote.py | loicbaron/myslice2 | 32af9462cc9e5654a6e3036978ae74b0a03a2698 | [
"MIT"
]
| 1 | 2020-06-02T12:30:07.000Z | 2020-06-02T12:30:07.000Z | myslice/lib/remote.py | loicbaron/myslice2 | 32af9462cc9e5654a6e3036978ae74b0a03a2698 | [
"MIT"
]
| 1 | 2018-10-29T16:11:26.000Z | 2018-10-29T16:11:26.000Z | import socket
import select
import os
import glob
import hashlib
import paramiko
from paramiko.ssh_exception import BadAuthenticationType, BadHostKeyException, AuthenticationException, SSHException
# static atm
username = 'root'
rsa_private_key = "/Users/moray/.ssh/planetlab_root_ssh_key.rsa"
remote_dir = "/root/.myslice"
local_dir = os.path.realpath(os.path.dirname(__file__) + '/../scripts')
def setup(hostname):
result = { "status" : False, "message" : None }
try:
pkey = paramiko.RSAKey.from_private_key_file(rsa_private_key)
except Exception as e:
#print 'Failed loading' % (rsa_private_key, e)
result["message"] = 'Failed loading' % (rsa_private_key, e)
return result
try:
transport = paramiko.Transport((hostname, 22))
except SSHException as e:
# Transport setup error
result['message'] = 'Failed SSH connection (%s)' % (e)
return result
except Exception as e:
result['message'] = 'Transport error (%s)' % (e)
return result
try:
transport.start_client()
except SSHException as e:
# if negotiation fails (and no event was passed in)
result['message'] = 'Failed SSH negotiation (%s)' % (e)
return result
try:
transport.auth_publickey(username, pkey)
except BadAuthenticationType as e:
# if public-key authentication isn't allowed by the server for this user (and no event was passed in)
result['message'] = 'Failed public-key authentication (%s)' % (e)
return result
except AuthenticationException as e:
# if the authentication failed (and no event was passed in)
result['message'] = 'Failed authentication (%s)' % (e)
return result
except SSHException as e:
# if there was a network error
result['message'] = 'Network error (%s)' % (e)
return result
sftp = paramiko.SFTPClient.from_transport(transport)
try:
sftp.chdir(remote_dir) # Test if remote_path exists
except IOError:
sftp.mkdir(remote_dir) # Create remote_path
sftp.chdir(remote_dir)
pass
for file_name in glob.glob(local_dir + '/*.*'):
local_file = os.path.join(local_dir, file_name)
remote_file = remote_dir + '/' + os.path.basename(file_name)
# check if remote file exists
try:
if sftp.stat(remote_file):
local_file_data = open(local_file, "rb").read()
remote_file_data = sftp.open(remote_file).read()
md1 = hashlib.md5(local_file_data).digest()
md2 = hashlib.md5(remote_file_data).digest()
if md1 == md2:
pass
#print "UNCHANGED:", os.path.basename(file_name)
else:
#print "MODIFIED:", os.path.basename(file_name)
sftp.put(local_file, remote_file)
except:
#print "NEW: ", os.path.basename(file_name)
sftp.put(local_file, remote_file)
sftp.chmod(remote_file, 0755)
sftp.close()
result['status'] = True
result['message'] = 'Setup complete'
return result
def connect(hostname):
'''
Try to connect to remote host
'''
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostname=hostname, username="root", key_filename=rsa_private_key)
except BadHostKeyException as e:
print(e)
raise
except AuthenticationException as e:
print(e)
raise
except SSHException as e:
print(e)
raise
except socket.error as e:
print(e)
raise
except IOError as e:
print(e)
raise
return ssh
def execute(hostname, command):
result = ''
ssh = connect(hostname)
# Send the command (non-blocking)
stdin, stdout, stderr = ssh.exec_command(command)
# Wait for the command to terminate
while not stdout.channel.exit_status_ready():
# Only print data if there is data to read in the channel
if stdout.channel.recv_ready():
rl, wl, xl = select.select([stdout.channel], [], [], 0.0)
if len(rl) > 0:
# Print data from stdout
result += stdout.channel.recv(1024)
ssh.close()
return result.strip()
def script(hostname, script):
'''
Executes a script on the remote node.
Scripts will return a json formatted string with result and information
'''
result = execute(hostname, remote_dir + "/" + script)
return result
if __name__ == '__main__':
node = 'mimas.ipv6.lip6.fr'
setup(node)
r = script(node, 'networks.sh')
print r | 30.388535 | 116 | 0.617481 |
66d0a89b7912adaa6ac8de189bb5d51e3349b9a2 | 17,093 | py | Python | haproxy/tests/test_unit.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
]
| null | null | null | haproxy/tests/test_unit.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
]
| null | null | null | haproxy/tests/test_unit.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
]
| null | null | null | import copy
import os
from collections import defaultdict
import mock
from . import common
BASE_CONFIG = {'url': 'http://localhost/admin?stats', 'collect_status_metrics': True, 'enable_service_check': True}
def _assert_agg_statuses(
aggregator, count_status_by_service=True, collate_status_tags_per_host=False, disable_service_tag=False
):
if disable_service_tag:
expected_statuses = common.AGG_STATUSES_BY_SERVICE_DISABLE_SERVICE_TAG
else:
expected_statuses = common.AGG_STATUSES_BY_SERVICE if count_status_by_service else common.AGG_STATUSES
for tags, value in expected_statuses:
if collate_status_tags_per_host:
# Assert that no aggregate statuses are sent
aggregator.assert_metric('haproxy.count_per_status', tags=tags, count=0)
else:
aggregator.assert_metric('haproxy.count_per_status', value=value, tags=tags)
def test_count_per_status_agg_only(aggregator, check, haproxy_mock):
config = copy.deepcopy(BASE_CONFIG)
# with count_status_by_service set to False
config['count_status_by_service'] = False
haproxy_check = check(config)
haproxy_check.check(config)
aggregator.assert_metric('haproxy.count_per_status', value=2, tags=['status:open'])
aggregator.assert_metric('haproxy.count_per_status', value=4, tags=['status:up'])
aggregator.assert_metric('haproxy.count_per_status', value=2, tags=['status:down'])
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['status:maint'])
aggregator.assert_metric('haproxy.count_per_status', value=0, tags=['status:nolb'])
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['status:no_check'])
_assert_agg_statuses(aggregator, count_status_by_service=False)
def test_count_per_status_by_service(aggregator, check, haproxy_mock):
config = copy.deepcopy(BASE_CONFIG)
haproxy_check = check(config)
haproxy_check.check(config)
aggregator.assert_metric(
'haproxy.count_per_status', value=1, tags=['status:open', 'service:a', 'haproxy_service:a']
)
aggregator.assert_metric('haproxy.count_per_status', value=3, tags=['status:up', 'service:b', 'haproxy_service:b'])
aggregator.assert_metric(
'haproxy.count_per_status', value=1, tags=['status:open', 'service:b', 'haproxy_service:b']
)
aggregator.assert_metric(
'haproxy.count_per_status', value=1, tags=['status:down', 'service:b', 'haproxy_service:b']
)
aggregator.assert_metric(
'haproxy.count_per_status', value=1, tags=['status:maint', 'service:b', 'haproxy_service:b']
)
tags = [
'status:up',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'status:down',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'status:no_check',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
_assert_agg_statuses(aggregator)
def test_count_per_status_by_service_and_host(aggregator, check, haproxy_mock):
config = copy.deepcopy(BASE_CONFIG)
config['collect_status_metrics_by_host'] = True
haproxy_check = check(config)
haproxy_check.check(config)
tags = ['backend:FRONTEND', 'status:open', 'service:a', 'haproxy_service:a']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = ['backend:FRONTEND', 'status:open', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
for backend in ['i-1', 'i-2', 'i-3']:
tags = ['backend:%s' % backend, 'status:up', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = ['backend:i-4', 'status:down', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = ['backend:i-5', 'status:maint', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'backend:i-1',
'status:up',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'backend:i-2',
'status:down',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'backend:i-3',
'status:no_check',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
_assert_agg_statuses(aggregator)
def test_count_per_status_by_service_and_collate_per_host(aggregator, check, haproxy_mock):
haproxy_check = check(BASE_CONFIG)
config = copy.deepcopy(BASE_CONFIG)
config['collect_status_metrics_by_host'] = True
config['collate_status_tags_per_host'] = True
haproxy_check.check(config)
tags = ['backend:FRONTEND', 'status:available', 'service:a', 'haproxy_service:a']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = ['backend:FRONTEND', 'status:available', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
for backend in ['i-1', 'i-2', 'i-3']:
tags = ['backend:%s' % backend, 'status:available', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = ['backend:i-4', 'status:unavailable', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = ['backend:i-5', 'status:unavailable', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'backend:i-1',
'status:available',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'backend:i-2',
'status:unavailable',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'backend:i-3',
'status:unavailable',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
_assert_agg_statuses(aggregator, collate_status_tags_per_host=True)
def test_count_per_status_by_service_and_collate_per_host_evil(aggregator, check, haproxy_mock_evil):
haproxy_check = check(BASE_CONFIG)
config = copy.deepcopy(BASE_CONFIG)
config['collect_status_metrics_by_host'] = True
config['collate_status_tags_per_host'] = True
haproxy_check.check(config)
tags = ['backend:FRONTEND', 'status:available', 'service:a', 'haproxy_service:a']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = ['backend:FRONTEND', 'status:available', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
for backend in ['i-1', 'i-2', 'i-3']:
tags = ['backend:%s' % backend, 'status:available', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = ['backend:i-4', 'status:unavailable', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = ['backend:i-5', 'status:unavailable', 'service:b', 'haproxy_service:b']
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'backend:i-1',
'status:available',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'backend:i-2',
'status:unavailable',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'backend:i-3',
'status:unavailable',
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
_assert_agg_statuses(aggregator, collate_status_tags_per_host=True)
def test_count_per_status_collate_per_host(aggregator, check, haproxy_mock):
haproxy_check = check(BASE_CONFIG)
config = copy.deepcopy(BASE_CONFIG)
config['collect_status_metrics_by_host'] = True
config['collate_status_tags_per_host'] = True
config['count_status_by_service'] = False
haproxy_check.check(config)
aggregator.assert_metric('haproxy.count_per_status', value=2, tags=['backend:FRONTEND', 'status:available'])
aggregator.assert_metric('haproxy.count_per_status', value=2, tags=['backend:i-1', 'status:available'])
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['backend:i-2', 'status:available'])
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['backend:i-2', 'status:unavailable'])
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['backend:i-3', 'status:available'])
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['backend:i-3', 'status:unavailable'])
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['backend:i-4', 'status:unavailable'])
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['backend:i-5', 'status:unavailable'])
_assert_agg_statuses(aggregator, count_status_by_service=False, collate_status_tags_per_host=True)
# This mock is only useful to make the first `run_check` run w/o errors
# (which in turn is useful only to initialize the check)
def test_count_hosts_statuses(aggregator, check, haproxy_mock):
haproxy_check = check(BASE_CONFIG)
haproxy_check.check(BASE_CONFIG)
filepath = os.path.join(common.HERE, 'fixtures', 'statuses_mock')
with open(filepath, 'r') as f:
data = f.read()
data = data.split('\n')
# per service
haproxy_check._process_data(data, True, False, collect_status_metrics=True, collect_status_metrics_by_host=False)
expected_hosts_statuses = defaultdict(int)
expected_hosts_statuses[('b', 'FRONTEND', 'open')] = 1
expected_hosts_statuses[('b', 'BACKEND', 'up')] = 3
expected_hosts_statuses[('b', 'BACKEND', 'down')] = 1
expected_hosts_statuses[('b', 'BACKEND', 'maint')] = 1
expected_hosts_statuses[('a', 'FRONTEND', 'open')] = 1
assert haproxy_check.hosts_statuses == expected_hosts_statuses
# backend hosts
agg_statuses = haproxy_check._process_backend_hosts_metric(expected_hosts_statuses)
expected_agg_statuses = {'b': {'available': 3, 'unavailable': 2}}
assert expected_agg_statuses == dict(agg_statuses)
# with process_events set to True
haproxy_check._process_data(data, True, True, collect_status_metrics=True, collect_status_metrics_by_host=False)
assert haproxy_check.hosts_statuses == expected_hosts_statuses
# per host
haproxy_check._process_data(data, True, False, collect_status_metrics=True, collect_status_metrics_by_host=True)
expected_hosts_statuses = defaultdict(int)
expected_hosts_statuses[('b', 'FRONTEND', 'FRONTEND', 'open')] = 1
expected_hosts_statuses[('a', 'FRONTEND', 'FRONTEND', 'open')] = 1
expected_hosts_statuses[('b', 'BACKEND', 'i-1', 'up')] = 1
expected_hosts_statuses[('b', 'BACKEND', 'i-2', 'up')] = 1
expected_hosts_statuses[('b', 'BACKEND', 'i-3', 'up')] = 1
expected_hosts_statuses[('b', 'BACKEND', 'i-4', 'down')] = 1
expected_hosts_statuses[('b', 'BACKEND', 'i-5', 'maint')] = 1
assert haproxy_check.hosts_statuses == expected_hosts_statuses
haproxy_check._process_data(data, True, True, collect_status_metrics=True, collect_status_metrics_by_host=True)
assert haproxy_check.hosts_statuses, expected_hosts_statuses
def test_optional_tags(aggregator, check, haproxy_mock):
config = copy.deepcopy(BASE_CONFIG)
config['tags'] = ['new-tag', 'my:new:tag']
haproxy_check = check(BASE_CONFIG)
haproxy_check.check(config)
aggregator.assert_metric_has_tag('haproxy.backend.session.current', 'new-tag')
aggregator.assert_metric_has_tag('haproxy.backend.session.current', 'my:new:tag')
aggregator.assert_metric_has_tag('haproxy.count_per_status', 'my:new:tag')
tags = ['service:a', 'haproxy_service:a', 'new-tag', 'my:new:tag', 'backend:BACKEND']
aggregator.assert_service_check('haproxy.backend_up', tags=tags)
def test_regex_tags(aggregator, check, haproxy_mock):
config = copy.deepcopy(BASE_CONFIG)
config['tags'] = ['region:infra']
# OS3 service: be_edge_http_sre-production_elk-kibana
config['tags_regex'] = r'be_(?P<security>edge_http|http)?_(?P<team>[a-z]+)\-(?P<env>[a-z]+)_(?P<app>.*)'
haproxy_check = check(BASE_CONFIG)
haproxy_check.check(config)
expected_tags = [
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
'type:BACKEND',
'instance_url:http://localhost/admin?stats',
'region:infra',
'security:edge_http',
'app:elk-kibana',
'env:production',
'team:sre',
'backend:BACKEND',
]
aggregator.assert_metric('haproxy.backend.session.current', value=1, count=1, tags=expected_tags)
aggregator.assert_metric_has_tag('haproxy.backend.session.current', 'app:elk-kibana', 1)
tags = [
'service:be_edge_http_sre-production_elk-kibana',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
'region:infra',
'security:edge_http',
'app:elk-kibana',
'env:production',
'team:sre',
'backend:i-1',
]
aggregator.assert_service_check('haproxy.backend_up', tags=tags)
def test_version_failure(aggregator, check, datadog_agent):
config = copy.deepcopy(BASE_CONFIG)
haproxy_check = check(config)
filepath = os.path.join(common.HERE, 'fixtures', 'mock_data')
with open(filepath, 'rb') as f:
data = f.read()
with mock.patch('requests.get') as m:
m.side_effect = [RuntimeError("Ooops"), mock.Mock(content=data)]
haproxy_check.check(config)
# Version failed, but we should have some metrics
aggregator.assert_metric(
'haproxy.count_per_status', value=1, tags=['status:open', 'service:a', 'haproxy_service:a']
)
# But no metadata
datadog_agent.assert_metadata_count(0)
def test_count_per_status_by_service_disable_service_tag(aggregator, check, haproxy_mock):
config = copy.deepcopy(BASE_CONFIG)
config['disable_legacy_service_tag'] = True
haproxy_check = check(config)
haproxy_check.check(config)
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['status:open', 'haproxy_service:a'])
aggregator.assert_metric('haproxy.count_per_status', value=3, tags=['status:up', 'haproxy_service:b'])
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['status:open', 'haproxy_service:b'])
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['status:down', 'haproxy_service:b'])
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=['status:maint', 'haproxy_service:b'])
tags = [
'status:up',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'status:down',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
tags = [
'status:no_check',
'haproxy_service:be_edge_http_sre-production_elk-kibana',
]
aggregator.assert_metric('haproxy.count_per_status', value=1, tags=tags)
_assert_agg_statuses(aggregator, disable_service_tag=True)
| 45.825737 | 119 | 0.713801 |
289e68bba99ec1fd4ae116072cfe517821955169 | 262 | py | Python | ScotlandPYard/spyengine/aimrx.py | fkarg/ScotlandPYard | 768ecbf20357f5cde8d669f05d11cacaf3299dbb | [
"MIT"
]
| 6 | 2017-12-12T08:45:16.000Z | 2020-05-15T19:34:09.000Z | ScotlandPYard/spyengine/aimrx.py | fkarg/ScotlandPYard | 768ecbf20357f5cde8d669f05d11cacaf3299dbb | [
"MIT"
]
| 139 | 2017-12-15T22:29:27.000Z | 2022-03-01T15:01:24.000Z | ScotlandPYard/spyengine/aimrx.py | fkarg/ScotlandPYard | 768ecbf20357f5cde8d669f05d11cacaf3299dbb | [
"MIT"
]
| 2 | 2018-10-09T17:38:55.000Z | 2020-06-21T21:28:28.000Z | from .abstractmrx import AbstractMrX
class AIMrX(AbstractMrX):
def __init__(self, engine, num_players=4):
super().__init__(engine, is_ai=True, num_players=num_players)
def play_next(self):
"""TODO: insert AI logic here"""
pass
| 23.818182 | 69 | 0.675573 |
0e4f940ad9df3bb034da44485bcf1e245fe2ec85 | 138 | py | Python | foo.py | Ca2Patton/PythonStuff | 9d13f340296bcea41dfca87a4b36e445821703de | [
"Apache-2.0"
]
| null | null | null | foo.py | Ca2Patton/PythonStuff | 9d13f340296bcea41dfca87a4b36e445821703de | [
"Apache-2.0"
]
| null | null | null | foo.py | Ca2Patton/PythonStuff | 9d13f340296bcea41dfca87a4b36e445821703de | [
"Apache-2.0"
]
| null | null | null | #!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
def myfunction (**args):
print args
myfunction(foo="bar", happy="joy")
| 19.714286 | 62 | 0.731884 |
2b7e260bb339364d3d4da02ea18165121e73f297 | 8,761 | py | Python | api/api/settings.py | marzmehr/representation-grant-app | 7155e9193367b7d538381aa7a0745ed1cdfb1f62 | [
"Apache-2.0"
]
| null | null | null | api/api/settings.py | marzmehr/representation-grant-app | 7155e9193367b7d538381aa7a0745ed1cdfb1f62 | [
"Apache-2.0"
]
| null | null | null | api/api/settings.py | marzmehr/representation-grant-app | 7155e9193367b7d538381aa7a0745ed1cdfb1f62 | [
"Apache-2.0"
]
| null | null | null | """
Django settings for api project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from corsheaders.defaults import default_headers
from core import database
from core.encryption import Encryptor
from core.utils.filter_logging_requests import filter_logging_requests
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# The SECRET_KEY is provided via an environment variable in OpenShift
SECRET_KEY = os.getenv(
"DJANGO_SECRET_KEY",
# safe value used for development when DJANGO_SECRET_KEY might not be set
"75f46345-af2d-497d-a3ec-b6f05e5266f4",
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv("DJANGO_DEBUG", "True") == "True"
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
# Add your apps here to enable them
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework_swagger",
"auditable",
"core",
"api",
"corsheaders",
"oidc_rp"
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"oidc_rp.middleware.OIDCRefreshIDTokenMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"core.XForwardedForPortMiddleware"
]
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
SESSION_SAVE_EVERY_REQUEST = True
ROOT_URLCONF = "core.urls"
# CORS_URLS_REGEX = r"^/api/v1/.*$"
CORS_URLS_REGEX = r"^.*$"
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_HEADERS = default_headers + ("x-demo-login",)
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "api\\templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"oidc_rp.context_processors.oidc",
]
},
}
]
WSGI_APPLICATION = "wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {"default": database.config()}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": (
"django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
)
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
AUTH_USER_MODEL = "api.User"
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"oidc_rp.backends.OIDCAuthBackend",
)
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = os.getenv("WEB_BASE_HREF", "/representation-grant/") + "/api/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
USE_X_FORWARDED_HOST = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"filter_logging_requests":
{
"()": "django.utils.log.CallbackFilter",
"callback": filter_logging_requests
}},
"formatters": {
"verbose": {
"format": (
"%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
)
},
"simple": {"format": "%(levelname)s %(message)s"},
},
"handlers": {
"console_handler": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "verbose",
}
},
"loggers": {
"api": {"handlers": ["console_handler"], "level": "DEBUG", "propagate": False},
"django": {
"handlers": ["console_handler"],
"level": "INFO",
"propagate": False,
},
"django.request": {
"handlers": ["console_handler"],
"level": "INFO",
"propagate": False,
},
},
"root": {
"handlers": ["console_handler"],
"level": str(os.getenv("DJANGO_LOG_LEVEL", "INFO")).upper(),
"propagate": False,
},
}
OIDC_ENABLED = False
# Settings for django-oidc-rp
OIDC_RP_PROVIDER_ENDPOINT = os.getenv(
"OIDC_RP_PROVIDER_ENDPOINT",
# FIXME no default here
"https://dev.oidc.gov.bc.ca/auth/realms/tz0e228w",
)
if OIDC_RP_PROVIDER_ENDPOINT:
OIDC_RP_PROVIDER_AUTHORIZATION_ENDPOINT = (
f"{OIDC_RP_PROVIDER_ENDPOINT}/protocol/openid-connect/auth"
)
OIDC_RP_PROVIDER_TOKEN_ENDPOINT = (
f"{OIDC_RP_PROVIDER_ENDPOINT}/protocol/openid-connect/token"
)
OIDC_RP_PROVIDER_JWKS_ENDPOINT = (
f"{OIDC_RP_PROVIDER_ENDPOINT}/protocol/openid-connect/certs"
)
OIDC_RP_PROVIDER_USERINFO_ENDPOINT = (
f"{OIDC_RP_PROVIDER_ENDPOINT}/protocol/openid-connect/userinfo"
)
OIDC_RP_PROVIDER_END_SESSION_ENDPOINT = (
f"{OIDC_RP_PROVIDER_ENDPOINT}/protocol/openid-connect/logout"
)
OIDC_RP_CLIENT_ID = os.getenv("OIDC_RP_CLIENT_ID", "representation-grant-api")
OIDC_RP_CLIENT_SECRET = os.getenv("OIDC_RP_CLIENT_SECRET")
OIDC_RP_PROVIDER_SIGNATURE_ALG = "RS256"
OIDC_RP_SCOPES = "openid profile email" # address phone
OIDC_RP_ID_TOKEN_INCLUDE_USERINFO = True
OIDC_RP_AUTHENTICATION_FAILURE_REDIRECT_URI = os.getenv("OIDC_RP_FAILURE_URI", "/representation-grant/")
OIDC_RP_USER_DETAILS_HANDLER = "core.auth.sync_keycloak_user"
OIDC_RP_AUTHENTICATION_REDIRECT_URI = (
os.getenv("OIDC_RP_AUTHENTICATION_REDIRECT_URI", "/representation-grant/")
)
OIDC_RP_KC_IDP_HINT = os.getenv("OIDC_RP_KC_IDP_HINT")
DRF_AUTH_CLASS = (
"oidc_rp.contrib.rest_framework.authentication.BearerTokenAuthentication"
)
OIDC_ENABLED = True
else:
DRF_AUTH_CLASS = "core.auth.DemoAuth"
del AUTHENTICATION_BACKENDS
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
DRF_AUTH_CLASS,
"rest_framework.authentication.SessionAuthentication",
)
}
EFILING_APP_NAME = os.environ.get("EFILING_APP_NAME", "Representation Grant")
EFILING_COURT_LEVEL = os.environ.get("EFILING_COURT_LEVEL", "S")
EFILING_COURT_CLASS = os.environ.get("EFILING_COURT_CLASS", "P") # https://bcgov.github.io/jag-file-submission/#/data?id=court-classification
EFILING_COURT_DIVISION = os.environ.get("EFILING_COURT_DIVISION", "I")
EFILING_HUB_API_BASE_URL = os.environ.get("EFILING_HUB_API_BASE_URL", "")
EFILING_HUB_KEYCLOAK_BASE_URL = os.environ.get("EFILING_HUB_KEYCLOAK_BASE_URL", "")
EFILING_HUB_KEYCLOAK_CLIENT_ID = os.environ.get("EFILING_HUB_KEYCLOAK_CLIENT_ID", "")
EFILING_HUB_KEYCLOAK_REALM = os.environ.get("EFILING_HUB_KEYCLOAK_REALM", "")
EFILING_HUB_KEYCLOAK_SECRET = os.environ.get("EFILING_HUB_KEYCLOAK_SECRET", "")
ENCRYPTOR = Encryptor("DATA_SECURITY_KEY")
FORCE_SCRIPT_NAME = os.getenv("WEB_BASE_HREF", "/representation-grant/")
LOGOUT_REDIRECT_URL = os.getenv("LOGOUT_REDIRECT_URL", "/representation-grant/")
SITEMINDER_LOGOFF_URL = os.getenv("SITEMINDER_LOGOFF_URL", "https://logontest.gov.bc.ca/clp-cgi/logoff.cgi")
| 32.93609 | 142 | 0.701176 |
49611ae91fb652b3531e2c61f9e192724c52b6cf | 1,829 | py | Python | scrumate/core/deliverable/models.py | nahidsaikat/scrumate | 11a63f1cc361261a7023eceafc2a27e29561dca0 | [
"MIT"
]
| 1 | 2019-04-29T05:44:34.000Z | 2019-04-29T05:44:34.000Z | scrumate/core/deliverable/models.py | nahidsaikat/scrumate | 11a63f1cc361261a7023eceafc2a27e29561dca0 | [
"MIT"
]
| 27 | 2019-05-20T18:42:28.000Z | 2019-07-20T08:00:46.000Z | scrumate/core/deliverable/models.py | nahidsaikat/scrumate | 11a63f1cc361261a7023eceafc2a27e29561dca0 | [
"MIT"
]
| null | null | null | from django.db import models
from simple_history.models import HistoricalRecords
from scrumate.core.deliverable.choices import DeliverableStatus
from scrumate.core.task.models import Task
from scrumate.core.project.models import Project
from scrumate.core.sprint.models import Sprint
from scrumate.general.choices import Priority
from scrumate.people.models import Employee
class Deliverable(models.Model):
project = models.ForeignKey(Project, on_delete=models.SET_NULL, default=None, null=True, blank=True)
task = models.ForeignKey(Task, on_delete=models.SET_NULL, default=None, null=True)
name = models.CharField(max_length=100)
description = models.TextField(default='', null=True, blank=True)
sprint = models.ForeignKey(Sprint, on_delete=models.SET_NULL, default=None, null=True)
estimated_hour = models.DecimalField(verbose_name='Point', default=0.0, decimal_places=2, max_digits=15, null=True, blank=True)
actual_hour = models.DecimalField(verbose_name='Actual Point', default=0.0, decimal_places=2, max_digits=15, null=True, blank=True)
priority = models.IntegerField(choices=Priority.choices, default=Priority.High, null=True, blank=True)
assignee = models.ForeignKey(Employee, on_delete=models.SET_NULL, default=None, null=True)
assign_date = models.DateField(default=None, null=True, blank=True)
release_date = models.DateField(default=None, null=True, blank=True)
status = models.IntegerField(choices=DeliverableStatus.choices, default=DeliverableStatus.Pending, null=True, blank=True)
history = HistoricalRecords()
def __str__(self):
return self.name
class Meta:
permissions = (
("update_deliverable_status", "Can Update Status of Deliverable"),
("deliverable_history", "Can See Deliverable History"),
) | 52.257143 | 135 | 0.762712 |
d4dce99898990c8328f4b60d00c1d7b05c635ca0 | 40 | py | Python | index.py | seyedmohammadhosseini/blockchain | 656f26a84c70e9bd9c041e0a9494454d70293cf1 | [
"MIT"
]
| null | null | null | index.py | seyedmohammadhosseini/blockchain | 656f26a84c70e9bd9c041e0a9494454d70293cf1 | [
"MIT"
]
| null | null | null | index.py | seyedmohammadhosseini/blockchain | 656f26a84c70e9bd9c041e0a9494454d70293cf1 | [
"MIT"
]
| null | null | null | from system import startup
startup.run() | 20 | 26 | 0.825 |
76b4898117480e31a640e64a75bddef47e10282e | 446 | py | Python | src/pretalx/orga/templatetags/orga_edit_link.py | hrchu/pretalx | cd7e5525f80c7290d9650065b4cf4f085032adfc | [
"Apache-2.0"
]
| 418 | 2017-10-05T05:52:49.000Z | 2022-03-24T09:50:06.000Z | src/pretalx/orga/templatetags/orga_edit_link.py | hrchu/pretalx | cd7e5525f80c7290d9650065b4cf4f085032adfc | [
"Apache-2.0"
]
| 1,049 | 2017-09-16T09:34:55.000Z | 2022-03-23T16:13:04.000Z | src/pretalx/orga/templatetags/orga_edit_link.py | hrchu/pretalx | cd7e5525f80c7290d9650065b4cf4f085032adfc | [
"Apache-2.0"
]
| 155 | 2017-10-16T18:32:01.000Z | 2022-03-15T12:48:33.000Z | from django import template
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _
register = template.Library()
@register.simple_tag()
def orga_edit_link(url, target=None):
if target:
url = f"{url}#{target}"
result = f'<a href="{url}" class="btn btn-xs btn-outline-primary orga-edit-link ml-auto" title="{_("Edit")}"><i class="fa fa-pencil"></i></a>'
return mark_safe(result)
| 31.857143 | 146 | 0.706278 |
741ad7355515195cea3829124a005bdf4b9f661e | 5,739 | py | Python | openstates/openstates-master/openstates/fl/legislators.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
]
| null | null | null | openstates/openstates-master/openstates/fl/legislators.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
]
| null | null | null | openstates/openstates-master/openstates/fl/legislators.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
]
| null | null | null | import re
import urlparse
from billy.scrape.legislators import LegislatorScraper, Legislator
import lxml.html
class FLLegislatorScraper(LegislatorScraper):
jurisdiction = 'fl'
latest_only = True
def scrape(self, chamber, term):
if chamber == 'upper':
self.scrape_senators(term)
else:
self.scrape_reps(term)
def scrape_sen_offices(self, leg, leg_url):
doc = lxml.html.fromstring(self.get(leg_url).text)
email = doc.xpath('//a[contains(@href, "mailto:")]')[0].get('href').split(':')[-1]
PHONE_RE = r'\(\d{3}\)\s\d{3}\-\d{4}'
offices = doc.xpath('//h4[contains(text(), "Office")]')
for office in offices:
(name, ) = office.xpath('text()')
if name == "Tallahassee Office":
type_ = 'capitol'
else:
type_ = 'district'
address_lines = [
x.strip() for x in
office.xpath('following-sibling::div[1]/text()')
if x.strip()
]
if re.search(PHONE_RE, address_lines[-1]):
phone = address_lines.pop()
else:
phone = None
if re.search(r'(?i)open\s+\w+day', address_lines[0]):
address_lines = address_lines[1: ]
assert ", FL" in address_lines[-1]
address = "\n".join(address_lines)
address = re.sub(r'\s{2,}', " ", address)
leg.add_office(
type=type_,
name=name,
address=address,
phone=phone,
email=email if type_ == 'capitol' else None
)
def scrape_senators(self, term):
url = "http://www.flsenate.gov/Senators/"
page = self.get(url).text
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
for link in page.xpath("//a[contains(@href, 'Senators/s')]"):
name = " ".join(link.xpath('.//text()'))
name = re.sub(r'\s+', " ", name).replace(" ,", ",").strip()
leg_url = link.get('href')
leg_doc = lxml.html.fromstring(self.get(leg_url).text)
leg_doc.make_links_absolute(leg_url)
if 'Vacant' in name:
continue
district = link.xpath("string(../../td[1])")
party = link.xpath("string(../../td[2])")
# for consistency
if party == 'Democrat':
party = 'Democratic'
photo_url = leg_doc.xpath('//div[@id="sidebar"]//img/@src').pop()
leg = Legislator(term, 'upper', district, name,
party=party, photo_url=photo_url, url=leg_url)
leg.add_source(url)
leg.add_source(leg_url)
self.scrape_sen_offices(leg, leg_url)
self.save_legislator(leg)
def scrape_rep_office(self, leg, doc, name):
pieces = [x.tail.strip() for x in
doc.xpath('//strong[text()="%s"]/following-sibling::br' %
name)]
if not pieces:
return
address = []
for piece in pieces:
if piece.startswith('Phone:'):
# 'Phone: \r\n (303) 222-2222'
if re.search(r'\d+', piece):
phone = piece.split(None, 1)[1]
else:
phone = None
else:
piece = re.sub(r'\s+', ' ', piece)
address.append(piece)
office = dict(name=name, address='\n'.join(address))
# Phone
if phone is not None:
office['phone'] = phone
# Type
if 'Capitol' in name:
office['type'] = 'capitol'
elif 'District' in name:
office['type'] = 'district'
leg.add_office(**office)
def scrape_reps(self, term):
url = ("http://www.flhouse.gov/Sections/Representatives/"
"representatives.aspx")
page = self.get(url).text
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
for div in page.xpath('//div[@class="rep_listing1"]'):
link = div.xpath('.//div[@class="rep_style"]/a')[0]
name = link.text_content().strip()
term_details = div.xpath(
'.//div[@class="term_style"]')[0].text_content()
if 'Resigned' in term_details:
continue
party = div.xpath('.//div[@class="party_style"]/text()')[0].strip()
if party == 'D':
party = 'Democratic'
elif party == 'R':
party = 'Republican'
else:
raise NotImplementedError(
"Unknown party found: {}".format(party))
district = div.xpath(
'.//div[@class="district_style"]/text()')[0].strip()
leg_url = link.get('href')
split_url = urlparse.urlsplit(leg_url)
member_id = urlparse.parse_qs(split_url.query)['MemberId'][0]
photo_url = ("http://www.flhouse.gov/FileStores/Web/"
"Imaging/Member/%s.jpg" % member_id)
leg = Legislator(term, 'lower', district, name,
party=party, photo_url=photo_url, url=leg_url)
# offices
leg_doc = lxml.html.fromstring(self.get(leg_url).text)
self.scrape_rep_office(leg, leg_doc, 'Capitol Office')
self.scrape_rep_office(leg, leg_doc, 'District Office')
leg.add_source(url)
leg.add_source(leg_url)
self.save_legislator(leg)
| 34.160714 | 90 | 0.500958 |
4fe3be244e850e895a2e634af2ac99f00ac6f470 | 1,101 | py | Python | watchout/Demo1/src/robot_decision/scripts/listener.py | GuoPingPan/Watchout | f1abf050408f429d3e3ae46c41f79714248b4010 | [
"Apache-2.0"
]
| null | null | null | watchout/Demo1/src/robot_decision/scripts/listener.py | GuoPingPan/Watchout | f1abf050408f429d3e3ae46c41f79714248b4010 | [
"Apache-2.0"
]
| null | null | null | watchout/Demo1/src/robot_decision/scripts/listener.py | GuoPingPan/Watchout | f1abf050408f429d3e3ae46c41f79714248b4010 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
import threading
msg1 = "hellow"
msg2 = "hellow"
def callback1(msg):
print("callback1")
print(msg)
msg1 = msg
def callback2(msg):
print("callback2")
print(msg)
msg2 = msg
def subone():
print("1")
sub1 = rospy.Subscriber("/pub1",String,callback1,queue_size=1)
rospy.spin()
def subtwo():
print("1")
sub2 = rospy.Subscriber("/pub2",String,callback2,queue_size=1)
rospy.spin()
def main():
rospy.init_node("listener")
thread1 = threading.Thread(target=subone)
thread2 = threading.Thread(target=subtwo)
thread1.start()
thread2.start()
# while(1):
# print("main")
# print(msg1)
# print(msg2)
def main2():
rospy.init_node("listener")
data1 = rospy.wait_for_message("/pub1",String,timeout=None)
data2 = rospy.wait_for_message("/pub2",String,timeout=None)
print(data1)
print(data2)
if __name__=="__main__":
try:
main2()
except rospy.ROSException:
print("error")
pass | 18.04918 | 66 | 0.620345 |
7e4290117025d869cdb8ea1ae3bce44745024a1e | 4,471 | py | Python | projects/ISTR/istr/dataset_mapper.py | braindevices/ISTR | 520b0d410ba8be5dbf53971d962b0bfcf072a7c0 | [
"Apache-2.0"
]
| 171 | 2021-05-04T02:44:01.000Z | 2022-03-28T09:58:29.000Z | projects/ISTR/istr/dataset_mapper.py | braindevices/ISTR | 520b0d410ba8be5dbf53971d962b0bfcf072a7c0 | [
"Apache-2.0"
]
| 10 | 2021-05-09T16:04:43.000Z | 2021-12-03T01:21:44.000Z | projects/ISTR/istr/dataset_mapper.py | braindevices/ISTR | 520b0d410ba8be5dbf53971d962b0bfcf072a7c0 | [
"Apache-2.0"
]
| 21 | 2021-05-04T02:47:57.000Z | 2022-01-06T07:34:24.000Z | import copy
import logging
import numpy as np
import torch
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data.transforms import TransformGen
__all__ = ["ISTRDatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
logger = logging.getLogger(__name__)
tfm_gens = []
if is_train:
tfm_gens.append(T.RandomFlip())
tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
@torch.no_grad()
class ISTRDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by SparseRCNN.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = [
T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
]
else:
self.crop_gen = None
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger(__name__).info(
"Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
utils.check_image_size(dataset_dict, image)
if self.crop_gen is None:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
if np.random.rand() > 0.5:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
image, transforms = T.apply_transform_gens(
self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
)
image_shape = image.shape[:2] # h, w
# print(image_shape)
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
# anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image_shape)
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| 36.647541 | 111 | 0.639454 |
de01461afcef863f7464a5a0d09d974cfda5f0ba | 3,115 | py | Python | backend/sales/models.py | cbreezy623/modabella | b68bcc8aca903887d31489baae609ed70fe3dba7 | [
"Apache-2.0"
]
| null | null | null | backend/sales/models.py | cbreezy623/modabella | b68bcc8aca903887d31489baae609ed70fe3dba7 | [
"Apache-2.0"
]
| null | null | null | backend/sales/models.py | cbreezy623/modabella | b68bcc8aca903887d31489baae609ed70fe3dba7 | [
"Apache-2.0"
]
| null | null | null | from django.db import models
from django.utils.translation import gettext_lazy as _
from appointments.models import Appointment, SchedulerAppointment
from products.models import Product
from services.models import Service, Modifier
from decimal import *
class PaymentMethod(models.TextChoices):
CASH = 'cash', _('Cash')
CARD = 'card', _('Credit/Debit Card')
CHECK = 'check', _('Check')
SPLIT = 'split', _('Split')
# Create your models here.
class PSale(models.Model):
appointment = models.ForeignKey(SchedulerAppointment, on_delete=models.PROTECT)
product = models.ForeignKey(Product, on_delete=models.PROTECT)
quantity = models.PositiveIntegerField()
unitSalePrice = models.DecimalField(max_digits=10, decimal_places=2)
payment = models.CharField(
max_length = 5,
choices=PaymentMethod.choices,
default=PaymentMethod.CASH
)
TAX = Decimal(0.055)
PRECISION = 7
@property
def unitTax(self):
return round(self.TAX * Decimal(self.unitSalePrice), self.PRECISION)
@property
def subtotal(self):
subtotal = Decimal(self.quantity) * Decimal(self.unitSalePrice)
subtotal = round(subtotal, self.PRECISION)
return '{:.2f}'.format(subtotal)
@property
def tax(self):
tax = Decimal(self.quantity) * self.TAX * Decimal(self.unitSalePrice)
tax = round(tax, self.PRECISION)
return tax
@property
def total(self):
subtotal = Decimal(self.quantity) * Decimal(self.unitSalePrice)
subtotal = round(subtotal, self.PRECISION)
return round(subtotal + self.quantity * self.unitTax, self.PRECISION)
@property
def name(self):
return self.product.name
def __str__(self):
return 'Product Sale: (' + str(self.quantity) + 'x)' + str(self.product) + ' ' + str(self.appointment)
class SSale(models.Model):
appointment = models.ForeignKey(SchedulerAppointment, on_delete=models.PROTECT)
service = models.ForeignKey(Service, on_delete=models.PROTECT)
salePrice = models.DecimalField(max_digits=10, decimal_places=2)
payment = models.CharField(
max_length = 5,
choices=PaymentMethod.choices,
default=PaymentMethod.CASH
)
@property
def name(self):
return self.service.name
def __str__(self):
return str(self.service) + ' ' + str(self.appointment)
class SMSale(models.Model):
service_sale = models.ForeignKey(SSale, on_delete=models.CASCADE)
modifier = models.ForeignKey(Modifier, on_delete=models.PROTECT)
salePrice = models.DecimalField(max_digits=10, decimal_places=2)
def __str__(self):
return 'SMSale: ' + str(self.service_sale) + str(self.modifier)
class Tips(models.Model):
appointment = models.ForeignKey(SchedulerAppointment, on_delete=models.PROTECT)
amount = models.DecimalField(max_digits=10, decimal_places=2)
payment = models.CharField(
max_length = 5,
choices=PaymentMethod.choices,
default=PaymentMethod.CASH
)
def __str(self):
return 'Tip: $' + str(self.amount)
| 33.138298 | 110 | 0.689567 |
52817143cc7a0213abd64709958136ec45543a0e | 18,040 | py | Python | src/m4_sequences.py | Zdanavz/20-Exam3Practice | 8ead8bf864e757cb78c2a107971bd53de67a63ad | [
"MIT"
]
| null | null | null | src/m4_sequences.py | Zdanavz/20-Exam3Practice | 8ead8bf864e757cb78c2a107971bd53de67a63ad | [
"MIT"
]
| null | null | null | src/m4_sequences.py | Zdanavz/20-Exam3Practice | 8ead8bf864e757cb78c2a107971bd53de67a63ad | [
"MIT"
]
| null | null | null | """
PRACTICE Exam 3.
This problem provides practice at:
*** SEQUENCES. ***
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Zack Z.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# Students:
#
# These problems have DIFFICULTY and TIME ratings:
# DIFFICULTY rating: 1 to 10, where:
# 1 is very easy
# 3 is an "easy" Test 2 question.
# 5 is a "typical" Test 2 question.
# 7 is a "hard" Test 2 question.
# 10 is an EXTREMELY hard problem (too hard for a Test 2 question)
#
# TIME ratings: A ROUGH estimate of the number of minutes that we
# would expect a well-prepared student to take on the problem.
#
# IMPORTANT: For ALL the problems in this module,
# if you reach the time estimate and are NOT close to a solution,
# STOP working on that problem and ASK YOUR INSTRUCTOR FOR HELP
# on it, in class or via Piazza.
###############################################################################
import simple_testing as st
import math
import rosegraphics as rg
def main():
""" Calls the TEST functions in this module. """
run_test_practice_problem4a()
run_test_practice_problem4b()
run_test_practice_problem4c()
run_test_practice_problem4d()
def is_prime(n):
"""
What comes in: An integer.
What goes out: Returns True if the given integer is prime.
Returns False if the given integer is NOT prime.
Side effects: None.
Examples:
This function returns True or False, depending on whether
the given integer is prime or not. Since the smallest prime is 2,
this function returns False on all integers < 2.
It returns True on 2, 3, 5, 7, and other primes.
Note: The algorithm used here is simple and clear but slow.
Type hints:
:type n: int
"""
if n < 2:
return False
for k in range(2, int(math.sqrt(n) + 0.1) + 1):
if n % k == 0:
return False
return True
# -------------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no TO DO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# -------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Students: Some of the testing code below uses SimpleTestCase objects,
# from the imported simple_testing (st) module.
# See details in the test code below.
# -----------------------------------------------------------------------------
def run_test_practice_problem4a():
""" Tests the practice_problem4a function. """
# -------------------------------------------------------------------------
# 4 tests. They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem4v((9, 33, 8, 8, 0, 4, 4, 8))
# and compare the returned value against [2, 5] (the correct answer).
# -------------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem4a,
[(9, 33, 8, 8, 0, 4, 4, 8)],
[2, 5]),
st.SimpleTestCase(practice_problem4a,
[(9, 9, 9, 9, 0, 9, 9, 9)],
[0, 1, 2, 5, 6]),
st.SimpleTestCase(practice_problem4a,
[(4, 5, 4, 5, 4, 5, 4)],
[]),
st.SimpleTestCase(practice_problem4a,
['abbabbb'],
[1, 4, 5]),
]
# Run the 4 tests in the tests list constructed above.
st.SimpleTestCase.run_tests('practice_problem4a', tests)
def practice_problem4a(sequence):
"""
What comes in: A non-empty sequence.
What goes out: Returns a list of integers,
where the integers are the places (indices)
where an item in the given sequence appears twice in a row.
Side effects: None.
Examples:
Given sequence (9, 33, 8, 8, 0, 4, 4, 8)
-- this function returns [2, 5]
since 8 appears twice in a row starting at index 2
and 4 appears twice in a row starting at index 5
Given sequence (9, 9, 9, 9, 0, 9, 9, 9)
-- this function returns [0, 1, 2, 5, 6]
Given sequence (4, 5, 4, 5, 4, 5, 4)
-- this function returns []
Given sequence 'abbabbb'
-- this function returns [1, 4, 5]
Type hints:
:type sequence: list | tuple | string
"""
###########################################################################
# Done: 2. Implement and test this function.
# The testing code is already written for you (above).
###########################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 7
# TIME ESTIMATE: 15 minutes.
###########################################################################
list=[]
for k in range(len(sequence)-1):
if sequence[k]==sequence[k+1]:
list=list+[k]
return list
def run_test_practice_problem4b():
""" Tests the practice_problem4b function. """
# -------------------------------------------------------------------------
# 5 tests. They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem4b((12, 33, 18, 9, 13, 3, 9, 20, 19, 20))
# and compare the returned value against 19 (the correct answer).
# -------------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem4b,
[(12, 33, 18, 9, 13, 3, 9, 20, 19, 20)],
19),
st.SimpleTestCase(practice_problem4b,
[(3, 12, 10, 8, 8, 9, 8, 11)],
10),
st.SimpleTestCase(practice_problem4b,
[(-9999999999, 8888888888)],
- 9999999999),
st.SimpleTestCase(practice_problem4b,
[(8888888888, -9999999999)],
8888888888),
st.SimpleTestCase(practice_problem4b,
[(-77, 20000, -33, 40000, -55,
60000, -11)],
- 11),
]
# -------------------------------------------------------------------------
# Run the 5 tests in the tests list constructed above.
# -------------------------------------------------------------------------
st.SimpleTestCase.run_tests('practice_problem4b', tests)
def practice_problem4b(sequence):
"""
What comes in:
A sequence of numbers, where the length of the sequence >= 2.
What goes out:
Returns the largest of the numbers at EVEN INDICES of the sequence.
Side effects: None.
Examples:
If the sequence is:
(12, 33, 18, 9, 13, 3, 99, 20, 19, 20)
then the largest of the numbers at EVEN indices is the largest of
12 18 13 99 19 which is 99.
So the function returns 99 in this example.
Type hints:
:type sequence: (list | tuple) of (float | int)
"""
# -------------------------------------------------------------------------
# Done: 3. Implement and test this function.
# The testing code is already written for you (above).
###########################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 5
# TIME ESTIMATE: 10 minutes.
###########################################################################
test=sequence[0]
for k in range(len(sequence)):
if k%2==0 and sequence[k]>test:
test=sequence[k]
return test
def run_test_practice_problem4c():
""" Tests the practice_problem4c function. """
# -------------------------------------------------------------------------
# 3 tests. They use the imported simple_testing (st) module.
# -------------------------------------------------------------------------
argument1 = (rg.Point(5, 12),
rg.Point(20, 20),
rg.Point(1, 13),
rg.Point(10, 40),
rg.Point(13, 5),
rg.Point(10, 3),
rg.Point(3, 7),
rg.Point(2, 2))
answer1 = rg.Point(5, 13)
argument2 = (rg.Point(5, 12),
rg.Point(20, 20),
rg.Point(27, 13),
rg.Point(10, 40),
rg.Point(13, 4),
rg.Point(1, 1),
rg.Point(3, 7))
answer2 = rg.Point(7, 3)
argument3 = (rg.Point(5, 2),
rg.Point(20, 20),
rg.Point(27, 13),
rg.Point(10, 40),
rg.Point(13, 4),
rg.Point(1, 1),
rg.Point(3, 7))
answer3 = rg.Point(2, 5)
argument4 = (rg.Point(5, 12),
rg.Point(20, 20),
rg.Point(27, 13))
answer4 = 'Not found'
tests = [st.SimpleTestCase(practice_problem4c, [argument1], answer1),
st.SimpleTestCase(practice_problem4c, [argument2], answer2),
st.SimpleTestCase(practice_problem4c, [argument3], answer3),
st.SimpleTestCase(practice_problem4c, [argument4], answer4),
]
# -------------------------------------------------------------------------
# Run the 3 tests in the tests list constructed above.
# -------------------------------------------------------------------------
st.SimpleTestCase.run_tests('practice_problem4c', tests)
if argument1[4] != answer1:
print()
print('*** WARNING, WARNING, WARNING ***')
print('If your code DID pass the above tests')
print('but you get this message,')
print('then you have missed an important concept about mutation.')
print(' *** SEE YOUR INSTRUCTOR for an important explanation!')
print()
def practice_problem4c(points):
"""
What comes in: A tuple of rg.Points, each of whose coordinates
is an integer.
What goes out:
AFTER doing the side effect below, this function
returns the rg.Point to which it did the side effect.
If there is no point to which to do the side effect,
returns 'Not found'.
Side effects:
Swaps the x and y coordinates of the first occurrence of an rg.Point
in the given list whose x and y coordinates are both primes.
Has no side effect if there are no such rg.Points
in the given list.
Examples:
If the given tuple is: (rg.Point(5, 12),
rg.Point(20, 20),
rg.Point(1, 13),
rg.Point(10, 40),
rg.Point(13, 5),
rg.Point(10, 3),
rg.Point(3, 7),
rg.Point(2, 2))
then after this function the rg.Point in the given tuple
whose x and y were (13, 5) will have x and y (5, 13)
and the function returns that rg.Point.
Type hints:
:type points: tuple of rg.Point
:rtype: rg.Point | string
"""
###########################################################################
# Done: 4. Implement and test this function.
# The testing code is already written for you (above).
#
# IMPORTANT: This problem is your LOWEST PRIORITY for preparing
# for Test 2. It is a great problem but WAY more subtle
# than anything that you will see on Test 2.
###########################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 9
# TIME ESTIMATE: 15 minutes.
###########################################################################
for k in range(len(points)):
if is_prime(points[k].x)==True and is_prime(points[k].y)==True:
newy=points[k].x
newx=points[k].y
points[k].x=newx
points[k].y=newy
return points[k]
return 'Not found'
def run_test_practice_problem4d():
""" Tests the practice_problem4d function. """
# -------------------------------------------------------------------------
# 5 tests. They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem4d((6, 80, 17, 13, 40, 3, 3, 7, 13, 7, 12, 5))
# and compare the returned value against 40 (the correct answer).
# -------------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem4d,
[(6, 80, 17, 13, 40, 3, 3, 7, 13, 7, 12, 5)],
17 + 3 + 7 + 13),
st.SimpleTestCase(practice_problem4d,
[(7, 7, 7, 7, 7, 4, 4, 8, 5, 5, 6)],
0),
st.SimpleTestCase(practice_problem4d,
[(2, 3, 5, 7, 5, 3, 2)],
2 + 3 + 5 + 7 + 5 + 3),
st.SimpleTestCase(practice_problem4d,
[(11, 3, 17, 13, 40, 3, 3, 7, 13, 7, 12, 5)],
11 + 3 + 17 + 3 + 7 + 13),
st.SimpleTestCase(practice_problem4d,
[(6, 80, 17, 13, 40, 3, 3, 7, 13, 7, 11, 5)],
17 + 3 + 7 + 13 + 7 + 11),
]
# Run the 5 tests in the tests list constructed above.
st.SimpleTestCase.run_tests('practice_problem4d', tests)
def practice_problem4d(sequence):
"""
What comes in: A non-empty sequence of integers.
What goes out: An integer that is the sum of all the items
in the given sequence such that:
-- the item is a prime number, AND
-- the immediate successor of the item
is a DIFFERENT prime number.
Side effects: None.
Examples:
Given sequence (6, 80, 17, 13, 40, 3, 3, 7, 13, 7, 12, 5)
-- this function returns 17 + 3 + 7 + 13, which is 40,
because:
6 (at index 0) is NOT prime - do NOT include 6 in the sum
80 (at index 1) is NOT prime - do NOT include 80 in the sum
17 (at index 2) IS prime AND the next item (13, at index 3)
is a DIFFERENT prime - ** DO ** include 17 in the sum
13 (at index 3) IS prime but the next item (40, at index 4)
is NOT prime - do NOT include 13 in the sum
40 (at index 4) is NOT prime - do NOT include 40 in the sum
3 (at index 5) IS prime AND the next item (3, at index 6)
IS prime but is NOT a DIFFERENT prime -
do NOT include 3 in the sum
3 (at index 6) IS prime AND the next item (7, at index 7)
is a DIFFERENT prime - ** DO ** include 3 in the sum
7 (at index 7) IS prime AND the next item (13, at index 8)
is a DIFFERENT prime - ** DO ** include 7 in the sum
13 (at index 8) IS prime AND the next item (7, at index 9)
is a DIFFERENT prime - ** DO ** include 13 in the sum
7 (at index 9) IS prime but the next item (12, at index 10)
is NOT prime - do NOT include 7 in the sum
12 (at index 10) is NOT prime - do NOT include 12 in the sum
5 (at index 11) IS prime but there is NO item after it
- do NOT include 5 in the sum
Given sequence (7, 7, 7, 7, 7, 4, 4, 8, 5, 5, 6)
-- this function returns 0
Given sequence (2, 3, 5, 7, 5, 3, 2)
-- this function returns 2 + 3 + 5 + 7 + 5 + 3, which is 25
Type hints:
:type sequence: (list | tuple) of int
:rtype: int
"""
###########################################################################
# Done: 5. Implement and test this function.
# The testing code is already written for you (above).
###########################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 7
# TIME ESTIMATE: 15 minutes.
###########################################################################
sum=0
for k in range(len(sequence)-1):
if is_prime(sequence[k])==True and is_prime(sequence[k+1])==True and sequence[k]!=sequence[k+1]:
sum=sum+sequence[k]
return sum
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 41.281465 | 104 | 0.47306 |
706f8374c59ed2888e924920e187de4a38c4d51b | 1,144 | py | Python | src/richie/apps/social/pipeline/user.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
]
| 174 | 2018-04-14T23:36:01.000Z | 2022-03-10T09:27:01.000Z | src/richie/apps/social/pipeline/user.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
]
| 631 | 2018-04-04T11:28:53.000Z | 2022-03-31T11:18:31.000Z | src/richie/apps/social/pipeline/user.py | leduong/richie | bf7ed379b7e2528cd790dadcec10ac2656efd189 | [
"MIT"
]
| 64 | 2018-06-27T08:35:01.000Z | 2022-03-10T09:27:43.000Z | """Module used by python-social-auth pipeline."""
from social_core.exceptions import AuthAlreadyAssociated, AuthFailed
USER_FIELDS = ["username", "email"]
# pylint: disable=unused-argument,keyword-arg-before-vararg
def get_username(strategy, details, backend, user=None, *args, **kwargs):
"""Check if the username already exists. Raise an exception if yes."""
if "username" not in backend.setting("USER_FIELDS", USER_FIELDS):
return None
storage = strategy.storage
if user:
# The user already exists return its username.
return {"username": storage.user.get_username(user)}
email_as_username = strategy.setting("USERNAME_IS_FULL_EMAIL", False)
if email_as_username and details.get("email"):
username = details["email"]
elif details.get("username"):
username = details["username"]
else:
raise AuthFailed(backend, "Failed to retrieve a valid username.")
if storage.user.user_exists(username=username):
raise AuthAlreadyAssociated(
backend, f"user with username {username} already exists."
)
return {"username": username}
| 33.647059 | 74 | 0.69493 |
e55107a5109ad63bb1e0c49da4342a6f33f656cc | 151 | py | Python | docs/api_overrides/headers.py | JosXa/TgIntegration | ea8faade8a8892650dfd2f67a83b027e4b2e269b | [
"MIT"
]
| 109 | 2018-07-07T12:37:05.000Z | 2022-03-16T19:48:47.000Z | docs/api_overrides/headers.py | JosXa/TgIntegration | ea8faade8a8892650dfd2f67a83b027e4b2e269b | [
"MIT"
]
| 26 | 2018-07-20T18:35:39.000Z | 2021-12-19T23:43:20.000Z | docs/api_overrides/headers.py | JosXa/TgIntegration | ea8faade8a8892650dfd2f67a83b027e4b2e269b | [
"MIT"
]
| 18 | 2018-08-25T10:37:30.000Z | 2022-01-02T00:41:09.000Z | from mkapi.plugins.mkdocs import MkapiPlugin
from mkdocs.config import Config
def on_config_with_mkapi(config: Config, mkapi: MkapiPlugin):
pass
| 21.571429 | 61 | 0.807947 |
c8d147acf9c3510b28c03d113f066b07dd4158f1 | 8,494 | py | Python | sdks/python/apache_beam/io/iobase_test.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
]
| 1 | 2019-08-02T18:03:15.000Z | 2019-08-02T18:03:15.000Z | sdks/python/apache_beam/io/iobase_test.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
]
| 80 | 2020-01-16T09:55:09.000Z | 2020-10-03T13:43:07.000Z | sdks/python/apache_beam/io/iobase_test.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
]
| 1 | 2020-04-29T20:09:40.000Z | 2020-04-29T20:09:40.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for classes in iobase.py."""
# pytype: skip-file
from __future__ import absolute_import
import unittest
import mock
import apache_beam as beam
from apache_beam.io.concat_source import ConcatSource
from apache_beam.io.concat_source_test import RangeSource
from apache_beam.io import iobase
from apache_beam.io.iobase import SourceBundle
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class SDFBoundedSourceRestrictionProviderTest(unittest.TestCase):
def setUp(self):
self.initial_range_start = 0
self.initial_range_stop = 4
self.initial_range_source = RangeSource(
self.initial_range_start, self.initial_range_stop)
self.sdf_restriction_provider = (
iobase._SDFBoundedSourceRestrictionProvider(desired_chunk_size=2))
def test_initial_restriction(self):
element = self.initial_range_source
restriction = (self.sdf_restriction_provider.initial_restriction(element))
self.assertTrue(
isinstance(restriction, iobase._SDFBoundedSourceRestriction))
self.assertTrue(isinstance(restriction._source_bundle, SourceBundle))
self.assertEqual(
self.initial_range_start, restriction._source_bundle.start_position)
self.assertEqual(
self.initial_range_stop, restriction._source_bundle.stop_position)
self.assertTrue(isinstance(restriction._source_bundle.source, RangeSource))
self.assertEqual(restriction._range_tracker, None)
def test_create_tracker(self):
expected_start = 1
expected_stop = 3
source_bundle = SourceBundle(
expected_stop - expected_start,
RangeSource(1, 3),
expected_start,
expected_stop)
restriction_tracker = (
self.sdf_restriction_provider.create_tracker(
iobase._SDFBoundedSourceRestriction(source_bundle)))
self.assertTrue(
isinstance(
restriction_tracker, iobase._SDFBoundedSourceRestrictionTracker))
self.assertEqual(expected_start, restriction_tracker.start_pos())
self.assertEqual(expected_stop, restriction_tracker.stop_pos())
def test_simple_source_split(self):
element = self.initial_range_source
restriction = (self.sdf_restriction_provider.initial_restriction(element))
expect_splits = [(0, 2), (2, 4)]
split_bundles = list(
self.sdf_restriction_provider.split(element, restriction))
self.assertTrue(
all([
isinstance(bundle._source_bundle, SourceBundle)
for bundle in split_bundles
]))
splits = ([(
bundle._source_bundle.start_position,
bundle._source_bundle.stop_position) for bundle in split_bundles])
self.assertEqual(expect_splits, list(splits))
def test_concat_source_split(self):
element = self.initial_range_source
initial_concat_source = ConcatSource([self.initial_range_source])
sdf_concat_restriction_provider = (
iobase._SDFBoundedSourceRestrictionProvider(desired_chunk_size=2))
restriction = (self.sdf_restriction_provider.initial_restriction(element))
expect_splits = [(0, 2), (2, 4)]
split_bundles = list(
sdf_concat_restriction_provider.split(
initial_concat_source, restriction))
self.assertTrue(
all([
isinstance(bundle._source_bundle, SourceBundle)
for bundle in split_bundles
]))
splits = ([(
bundle._source_bundle.start_position,
bundle._source_bundle.stop_position) for bundle in split_bundles])
self.assertEqual(expect_splits, list(splits))
def test_restriction_size(self):
element = self.initial_range_source
restriction = (self.sdf_restriction_provider.initial_restriction(element))
split_1, split_2 = self.sdf_restriction_provider.split(element, restriction)
split_1_size = self.sdf_restriction_provider.restriction_size(
element, split_1)
split_2_size = self.sdf_restriction_provider.restriction_size(
element, split_2)
self.assertEqual(2, split_1_size)
self.assertEqual(2, split_2_size)
class SDFBoundedSourceRestrictionTrackerTest(unittest.TestCase):
def setUp(self):
self.initial_start_pos = 0
self.initial_stop_pos = 4
source_bundle = SourceBundle(
self.initial_stop_pos - self.initial_start_pos,
RangeSource(self.initial_start_pos, self.initial_stop_pos),
self.initial_start_pos,
self.initial_stop_pos)
self.sdf_restriction_tracker = (
iobase._SDFBoundedSourceRestrictionTracker(
iobase._SDFBoundedSourceRestriction(source_bundle)))
def test_current_restriction_before_split(self):
current_restriction = (self.sdf_restriction_tracker.current_restriction())
self.assertEqual(
self.initial_start_pos,
current_restriction._source_bundle.start_position)
self.assertEqual(
self.initial_stop_pos, current_restriction._source_bundle.stop_position)
self.assertEqual(
self.initial_start_pos,
current_restriction._range_tracker.start_position())
self.assertEqual(
self.initial_stop_pos,
current_restriction._range_tracker.stop_position())
def test_current_restriction_after_split(self):
fraction_of_remainder = 0.5
self.sdf_restriction_tracker.try_claim(1)
expected_restriction, _ = (
self.sdf_restriction_tracker.try_split(fraction_of_remainder))
current_restriction = self.sdf_restriction_tracker.current_restriction()
self.assertEqual(
expected_restriction._source_bundle, current_restriction._source_bundle)
self.assertTrue(current_restriction._range_tracker)
def test_try_split_at_remainder(self):
fraction_of_remainder = 0.4
expected_primary = (0, 2, 2.0)
expected_residual = (2, 4, 2.0)
self.sdf_restriction_tracker.try_claim(0)
actual_primary, actual_residual = (
self.sdf_restriction_tracker.try_split(fraction_of_remainder))
self.assertEqual(
expected_primary,
(
actual_primary._source_bundle.start_position,
actual_primary._source_bundle.stop_position,
actual_primary._source_bundle.weight))
self.assertEqual(
expected_residual,
(
actual_residual._source_bundle.start_position,
actual_residual._source_bundle.stop_position,
actual_residual._source_bundle.weight))
self.assertEqual(
actual_primary._source_bundle.weight,
self.sdf_restriction_tracker.current_restriction().weight())
class UseSdfBoundedSourcesTests(unittest.TestCase):
def _run_sdf_wrapper_pipeline(self, source, expected_values):
with beam.Pipeline() as p:
experiments = (p._options.view_as(DebugOptions).experiments or [])
# Setup experiment option to enable using SDFBoundedSourceWrapper
if 'beam_fn_api' not in experiments:
# Required so mocking below doesn't mock Create used in assert_that.
experiments.append('beam_fn_api')
p._options.view_as(DebugOptions).experiments = experiments
actual = p | beam.io.Read(source)
assert_that(actual, equal_to(expected_values))
@mock.patch('apache_beam.io.iobase.SDFBoundedSourceReader.expand')
def test_sdf_wrapper_overrides_read(self, sdf_wrapper_mock_expand):
def _fake_wrapper_expand(pbegin):
return pbegin | beam.Map(lambda x: 'fake')
sdf_wrapper_mock_expand.side_effect = _fake_wrapper_expand
self._run_sdf_wrapper_pipeline(RangeSource(0, 4), ['fake'])
def test_sdf_wrap_range_source(self):
self._run_sdf_wrapper_pipeline(RangeSource(0, 4), [0, 1, 2, 3])
if __name__ == '__main__':
unittest.main()
| 39.324074 | 80 | 0.749823 |
a3107c30343304c077ccd961fa8d41050f3685fc | 8,035 | py | Python | env/lib/python3.6/site-packages/django/db/backends/base/introspection.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
]
| 58 | 2018-10-03T19:41:36.000Z | 2022-03-14T21:24:43.000Z | env/lib/python3.6/site-packages/django/db/backends/base/introspection.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
]
| 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | env/lib/python3.6/site-packages/django/db/backends/base/introspection.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
]
| 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | from collections import namedtuple
# Structure returned by DatabaseIntrospection.get_table_list()
TableInfo = namedtuple('TableInfo', ['name', 'type'])
# Structure returned by the DB-API cursor.description interface (PEP 249)
FieldInfo = namedtuple('FieldInfo', 'name type_code display_size internal_size precision scale null_ok default')
class BaseDatabaseIntrospection:
"""Encapsulate backend-specific introspection utilities."""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""
Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example.
"""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""
Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def column_name_converter(self, name):
"""
Apply a conversion to the column name for the purposes of comparison.
Use table_name_converter() by default.
"""
return self.table_name_converter(name)
def table_names(self, cursor=None, include_views=False):
"""
Return a list of names of all tables that exist in the database.
Sort the returned table list by Python's default sorting. Do NOT use
the database's ORDER BY here to avoid subtle differences in sorting
order between databases.
"""
def get_names(cursor):
return sorted(ti.name for ti in self.get_table_list(cursor)
if include_views or ti.type == 't')
if cursor is None:
with self.connection.cursor() as cursor:
return get_names(cursor)
return get_names(cursor)
def get_table_list(self, cursor):
"""
Return an unsorted list of TableInfo named tuples of all tables and
views that exist in the database.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_table_list() method')
def django_table_names(self, only_existing=False, include_views=True):
"""
Return a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, include only the tables in the database.
"""
from django.apps import apps
from django.db import router
tables = set()
for app_config in apps.get_app_configs():
for model in router.get_migratable_models(app_config, self.connection.alias):
if not model._meta.managed:
continue
tables.add(model._meta.db_table)
tables.update(
f.m2m_db_table() for f in model._meta.local_many_to_many
if f.remote_field.through._meta.managed
)
tables = list(tables)
if only_existing:
existing_tables = self.table_names(include_views=include_views)
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"""
Return a set of all models represented by the provided list of table
names.
"""
from django.apps import apps
from django.db import router
all_models = []
for app_config in apps.get_app_configs():
all_models.extend(router.get_migratable_models(app_config, self.connection.alias))
tables = list(map(self.table_name_converter, tables))
return {
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
}
def sequence_list(self):
"""
Return a list of information about all DB sequences for all models in
all apps.
"""
from django.apps import apps
from django.db import router
sequence_list = []
cursor = self.connection.cursor()
for app_config in apps.get_app_configs():
for model in router.get_migratable_models(app_config, self.connection.alias):
if not model._meta.managed:
continue
if model._meta.swapped:
continue
sequence_list.extend(self.get_sequences(cursor, model._meta.db_table, model._meta.local_fields))
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.remote_field.through is None:
sequence = self.get_sequences(cursor, f.m2m_db_table())
sequence_list.extend(sequence if sequence else [{'table': f.m2m_db_table(), 'column': None}])
return sequence_list
def get_sequences(self, cursor, table_name, table_fields=()):
"""
Return a list of introspected sequences for table_name. Each sequence
is a dict: {'table': <table_name>, 'column': <column_name>}. An optional
'name' key can be added if the backend supports named sequences.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_sequences() method')
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of:
(column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_key_columns() method')
def get_primary_key_column(self, cursor, table_name):
"""
Return the name of the primary key column for the given table.
"""
for constraint in self.get_constraints(cursor, table_name).values():
if constraint['primary_key']:
return constraint['columns'][0]
return None
def get_indexes(self, cursor, table_name):
"""
Deprecated in Django 1.11, use get_constraints instead.
Return a dictionary of indexed fieldname -> infodict for the given
table, where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
Only single-column indexes are introspected.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_indexes() method')
def get_constraints(self, cursor, table_name):
"""
Retrieve any constraints or keys (unique, pk, fk, check, index)
across one or more columns.
Return a dict mapping constraint names to their attributes,
where attributes is a dict with keys:
* columns: List of columns this covers
* primary_key: True if primary key, False otherwise
* unique: True if this is a unique constraint, False otherwise
* foreign_key: (table, column) of target, or None
* check: True if check constraint, False otherwise
* index: True if index, False otherwise.
* orders: The order (ASC/DESC) defined for the columns of indexes
* type: The type of the index (btree, hash, etc.)
Some backends may return special constraint names that don't exist
if they don't name constraints of a certain type (e.g. SQLite)
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_constraints() method')
| 41.632124 | 117 | 0.640572 |
9231a9dd42e45467f775342d1226d64a3c6d8495 | 847 | py | Python | 1013.py | OmangRawat/Leetcode | 6fa696367ef9c5e6b08940b11e2202382d1afc07 | [
"MIT"
]
| null | null | null | 1013.py | OmangRawat/Leetcode | 6fa696367ef9c5e6b08940b11e2202382d1afc07 | [
"MIT"
]
| null | null | null | 1013.py | OmangRawat/Leetcode | 6fa696367ef9c5e6b08940b11e2202382d1afc07 | [
"MIT"
]
| null | null | null | """
---> Partition Array Into Three Parts With Equal Sum
---> Easy
"""
class Solution:
def canThreePartsEqualSum(self, arr) -> bool:
sum_of_part = sum(arr) / 3
if sum_of_part != int(sum_of_part) or len(arr) < 3:
return False
curr_sum = 0
no_of_parts = 0
for i in arr:
curr_sum += i
print(curr_sum)
if no_of_parts == 3:
break
if curr_sum == sum_of_part:
curr_sum = 0
no_of_parts += 1
return no_of_parts == 3
in_arr = [3, 3, 6, 5, -2, 2, 5, 1, -9, 4]
a = Solution()
print(a.canThreePartsEqualSum(in_arr))
"""
Check for each parts total sum then add till the sum is achieved then start sum for next part
"""
| 20.658537 | 93 | 0.497048 |
c9666013be6917ca7f0fb6c40afaddb048da53c1 | 4,724 | py | Python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_09_01/operations/_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
]
| 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_09_01/operations/_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
]
| 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_09_01/operations/_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
]
| 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Gets a list of compute operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.containerservice.v2020_09_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.ContainerService/operations'} # type: ignore
| 42.945455 | 133 | 0.6558 |
f8e99bc8b96f285debd4f1ea53b92bc3aea12891 | 1,202 | py | Python | 2019/10 October/dp10202019.py | vishrutkmr7/DailyPracticeProblemsDIP | d1bfbc75f2024736c22c05385f753a90ddcfa0f5 | [
"MIT"
]
| 5 | 2019-08-06T02:34:41.000Z | 2022-01-08T03:03:16.000Z | 2019/10 October/dp10202019.py | ourangzeb/DailyPracticeProblemsDIP | 66c07af88754e5d59b243e3ee9f02db69f7c0a77 | [
"MIT"
]
| 15 | 2021-06-01T14:04:16.000Z | 2022-03-08T21:17:22.000Z | 2019/10 October/dp10202019.py | ourangzeb/DailyPracticeProblemsDIP | 66c07af88754e5d59b243e3ee9f02db69f7c0a77 | [
"MIT"
]
| 4 | 2019-09-19T20:00:05.000Z | 2021-08-16T11:31:51.000Z | # This problem was recently asked by Facebook:
# You are given the root of a binary tree. Find the path between 2 nodes that maximizes the sum of all the nodes in the path,
# and return the sum. The path does not necessarily need to go through the root.
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def maxPathSumUtil(root):
# Fill this in.
if root is None:
return 0
l = maxPathSumUtil(root.left)
r = maxPathSumUtil(root.right)
max_single = max(max(l, r) + root.val, root.val)
max_top = max(max_single, l + r + root.val)
maxPathSumUtil.res = max(maxPathSumUtil.res, max_top)
return max_single
def maxPathSum(root):
maxPathSumUtil.res = float("-inf")
maxPathSumUtil(root)
return maxPathSumUtil.res
# (* denotes the max path)
# *10
# / \
# *2 *10
# / \ \
# *20 1 -25
# / \
# 3 4
root = Node(10)
root.left = Node(2)
root.right = Node(10)
root.left.left = Node(20)
root.left.right = Node(1)
root.right.right = Node(-25)
root.right.right.left = Node(3)
root.right.right.right = Node(4)
print(maxPathSum(root))
# 42
| 23.568627 | 125 | 0.623128 |
335005df148e53c295b210b88d1580ab7b74052f | 1,640 | py | Python | z2/part3/updated_part2_batch/jm/parser_errors_2/585479137.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
]
| 1 | 2020-04-16T12:13:47.000Z | 2020-04-16T12:13:47.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/585479137.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
]
| 18 | 2020-03-06T17:50:15.000Z | 2020-05-19T14:58:30.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/585479137.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
]
| 18 | 2020-03-06T17:45:13.000Z | 2020-06-09T19:18:31.000Z | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 585479137
"""
"""
random actions, total chaos
"""
board = gamma_new(3, 3, 4, 3)
assert board is not None
assert gamma_move(board, 1, 2, 2) == 1
assert gamma_move(board, 2, 1, 1) == 1
assert gamma_free_fields(board, 2) == 7
assert gamma_move(board, 3, 1, 2) == 1
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_busy_fields(board, 3) == 1
assert gamma_move(board, 4, 0, 1) == 1
assert gamma_move(board, 4, 0, 1) == 0
assert gamma_busy_fields(board, 4) == 1
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_busy_fields(board, 2) == 1
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 4, 1, 2) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 2, 2, 0) == 1
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 4, 2, 0) == 0
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 3, 0, 0) == 0
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_free_fields(board, 1) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_busy_fields(board, 2) == 2
gamma_delete(board)
| 28.275862 | 40 | 0.656707 |
50a186c28684d7813f5908b76287234350232bb8 | 2,609 | py | Python | python/mine/mine/app.py | kuwarkapur/Hacktoberfest-2022 | efaafeba5ce51d8d2e2d94c6326cc20bff946f17 | [
"MIT"
]
| 1 | 2021-12-03T09:23:41.000Z | 2021-12-03T09:23:41.000Z | python/mine/mine/app.py | kuwarkapur/Hacktoberfest-2022 | efaafeba5ce51d8d2e2d94c6326cc20bff946f17 | [
"MIT"
]
| null | null | null | python/mine/mine/app.py | kuwarkapur/Hacktoberfest-2022 | efaafeba5ce51d8d2e2d94c6326cc20bff946f17 | [
"MIT"
]
| null | null | null | import pygame
import game_config as gc
import mineswipper as game
from pygame import display, event, image
from time import sleep
import Images
def find_index_from_xy(x, y):
row = y // gc.IMAGE_SIZE
col = x // gc.IMAGE_SIZE
index = row * gc.NUM_TILES_SIDE + col
return row, col, index
pygame.init()
display.set_caption('Mineswipper')
screen = display.set_mode((gc.SCREEN_SIZE, gc.SCREEN_SIZE))
running = True
board = game.BOX(gc.NUM_TILES_SIDE,gc.MINES)
visible_part = board.visible_part()
actual_part = board.actual_board()
tiles = [[None for _ in range(gc.NUM_TILES_TOTAL)] for _ in range(gc.NUM_TILES_TOTAL)]
for i in range(gc.NUM_TILES_SIDE):
for j in range(gc.NUM_TILES_SIDE):
tiles[i][j] = Images.Image(i,j,actual_part[i][j])
def actual_board_display():
screen.blit(image.load('assets/11.png'), (0, 0))
display.flip()
sleep(2.1)
screen.fill((0, 0, 0))
for i in range(gc.NUM_TILES_SIDE):
for j in range(gc.NUM_TILES_SIDE):
tile = tiles[i][j]
screen.blit(tile.image, (tile.col * gc.IMAGE_SIZE + gc.MARGIN, tile.row * gc.IMAGE_SIZE + gc.MARGIN))
display.flip()
sleep(5)
while running:
current_events = event.get()
for e in current_events:
if e.type == pygame.QUIT:
running = False
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_ESCAPE:
running = False
if e.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
row, col, index = find_index_from_xy(mouse_x, mouse_y)
if row >= gc.NUM_TILES_SIDE or col >= gc.NUM_TILES_SIDE:
continue
if (row,col) not in board.checked:
val = board.dig(row,col)
if val is False:
running = False
print('You Lose!')
actual_board_display()
# Display
screen.fill((0, 0, 0))
for i in range(gc.NUM_TILES_SIDE):
for j in range(gc.NUM_TILES_SIDE):
tile = tiles[i][j]
current_image = tile.image if (i,j) in board.checked else tile.box
screen.blit(current_image, (tile.col * gc.IMAGE_SIZE + gc.MARGIN, tile.row * gc.IMAGE_SIZE + gc.MARGIN))
display.flip()
if len(board.checked) == gc.NUM_TILES_TOTAL - gc.MINES:
running = False
print('You Win!')
screen.blit(image.load('assets/12.png'), (0, 0))
display.flip()
sleep(2.1)
print('Goodbye!')
| 30.337209 | 117 | 0.589115 |
d51e7c5ca01618c4e5a14ffcf615f490ee81370b | 2,420 | py | Python | openstack_dashboard/dashboards/admin/metadata_defs/tabs.py | hashsos/hashcloudos-horizon | 0cc080ca6777e4a1dac5cbcc6143202baddab176 | [
"Apache-2.0"
]
| 930 | 2015-01-04T08:06:03.000Z | 2022-03-13T18:47:13.000Z | openstack_dashboard/dashboards/admin/metadata_defs/tabs.py | hashsos/hashcloudos-horizon | 0cc080ca6777e4a1dac5cbcc6143202baddab176 | [
"Apache-2.0"
]
| 106 | 2019-01-18T03:06:55.000Z | 2019-11-29T05:06:18.000Z | openstack_dashboard/dashboards/admin/metadata_defs/tabs.py | hashsos/hashcloudos-horizon | 0cc080ca6777e4a1dac5cbcc6143202baddab176 | [
"Apache-2.0"
]
| 1,040 | 2015-01-01T18:48:28.000Z | 2022-03-19T08:35:18.000Z | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import glance
from openstack_dashboard.dashboards.admin.metadata_defs \
import constants
class OverviewTab(tabs.Tab):
name = _("Namespace Overview")
slug = "overview"
template_name = constants.METADATA_DETAIL_OVERVIEW_TEMPLATE
def get_context_data(self, request):
namespace_name = self.tab_group.kwargs['namespace_id']
try:
namespace = glance.metadefs_namespace_get(request,
namespace_name,
wrap=True)
except Exception:
namespace = None
msg = _('Unable to retrieve namespace details.')
exceptions.handle(request, msg)
return {"namespace": namespace}
class ContentsTab(tabs.Tab):
name = _("Contents")
slug = "contents"
template_name = constants.METADATA_DETAIL_CONTENTS_TEMPLATE
preload = False
def get_context_data(self, request):
namespace_id = self.tab_group.kwargs['namespace_id']
try:
namespace = glance.metadefs_namespace_get(request,
namespace_id,
wrap=True)
except Exception:
msg = _('Unable to retrieve namespace contents.')
exceptions.handle(request, msg)
return None
return {
'namespace_name': namespace.namespace,
"namespace_contents": namespace.as_json()}
class NamespaceDetailTabs(tabs.DetailTabsGroup):
slug = "namespace_details"
tabs = (OverviewTab, ContentsTab)
| 35.072464 | 78 | 0.63595 |
e201d3ed7e787d9f431f3c50d52fe5a3e3f4a5b8 | 112,425 | py | Python | python/ccxt/async_support/binance.py | zsyh/ccxt | ac0d0ac2a20166b8b5dc0523ed2d1e4e7f505b80 | [
"MIT"
]
| null | null | null | python/ccxt/async_support/binance.py | zsyh/ccxt | ac0d0ac2a20166b8b5dc0523ed2d1e4e7f505b80 | [
"MIT"
]
| null | null | null | python/ccxt/async_support/binance.py | zsyh/ccxt | ac0d0ac2a20166b8b5dc0523ed2d1e4e7f505b80 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OrderImmediatelyFillable
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
class binance(Exchange):
def describe(self):
return self.deep_extend(super(binance, self).describe(), {
'id': 'binance',
'name': 'Binance',
'countries': ['JP', 'MT'], # Japan, Malta
'rateLimit': 500,
'certified': True,
'pro': True,
# new metainfo interface
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchBidsAsks': True,
'fetchClosedOrders': 'emulated',
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingFees': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOrderBook': True,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'fetchTransactions': False,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
'3d': '3d',
'1w': '1w',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/29604020-d5483cdc-87ee-11e7-94c7-d1a8d9169293.jpg',
'test': {
'dapiPublic': 'https://testnet.binancefuture.com/dapi/v1',
'dapiPrivate': 'https://testnet.binancefuture.com/dapi/v1',
'fapiPublic': 'https://testnet.binancefuture.com/fapi/v1',
'fapiPrivate': 'https://testnet.binancefuture.com/fapi/v1',
'fapiPrivateV2': 'https://testnet.binancefuture.com/fapi/v2',
'public': 'https://testnet.binance.vision/api/v3',
'private': 'https://testnet.binance.vision/api/v3',
'v3': 'https://testnet.binance.vision/api/v3',
'v1': 'https://testnet.binance.vision/api/v1',
},
'api': {
'wapi': 'https://api.binance.com/wapi/v3',
'sapi': 'https://api.binance.com/sapi/v1',
'dapiPublic': 'https://dapi.binance.com/dapi/v1',
'dapiPrivate': 'https://dapi.binance.com/dapi/v1',
'dapiData': 'https://dapi.binance.com/futures/data',
'fapiPublic': 'https://fapi.binance.com/fapi/v1',
'fapiPrivate': 'https://fapi.binance.com/fapi/v1',
'fapiData': 'https://fapi.binance.com/futures/data',
'fapiPrivateV2': 'https://fapi.binance.com/fapi/v2',
'public': 'https://api.binance.com/api/v3',
'private': 'https://api.binance.com/api/v3',
'v3': 'https://api.binance.com/api/v3',
'v1': 'https://api.binance.com/api/v1',
},
'www': 'https://www.binance.com',
'referral': 'https://www.binance.com/?ref=10205187',
'doc': [
'https://binance-docs.github.io/apidocs/spot/en',
],
'api_management': 'https://www.binance.com/en/usercenter/settings/api-management',
'fees': 'https://www.binance.com/en/fee/schedule',
},
'api': {
# the API structure below will need 3-layer apidefs
'sapi': {
'get': [
'accountSnapshot',
# these endpoints require self.apiKey
'margin/asset',
'margin/pair',
'margin/allAssets',
'margin/allPairs',
'margin/priceIndex',
# these endpoints require self.apiKey + self.secret
'asset/assetDividend',
'margin/loan',
'margin/repay',
'margin/account',
'margin/transfer',
'margin/interestHistory',
'margin/forceLiquidationRec',
'margin/order',
'margin/openOrders',
'margin/allOrders',
'margin/myTrades',
'margin/maxBorrowable',
'margin/maxTransferable',
'margin/isolated/transfer',
'margin/isolated/account',
'margin/isolated/pair',
'margin/isolated/allPairs',
'futures/transfer',
'futures/loan/borrow/history',
'futures/loan/repay/history',
'futures/loan/wallet',
'futures/loan/configs',
'futures/loan/calcAdjustLevel',
'futures/loan/calcMaxAdjustAmount',
'futures/loan/adjustCollateral/history',
'futures/loan/liquidationHistory',
# https://binance-docs.github.io/apidocs/spot/en/#withdraw-sapi
'capital/config/getall', # get networks for withdrawing USDT ERC20 vs USDT Omni
'capital/deposit/address',
'capital/deposit/hisrec',
'capital/deposit/subAddress',
'capital/deposit/subHisrec',
'capital/withdraw/history',
'sub-account/futures/account',
'sub-account/futures/accountSummary',
'sub-account/futures/positionRisk',
'sub-account/futures/internalTransfer',
'sub-account/margin/account',
'sub-account/margin/accountSummary',
'sub-account/spotSummary',
'sub-account/status',
'sub-account/transfer/subUserHistory',
'sub-account/universalTransfer',
# lending endpoints
'lending/daily/product/list',
'lending/daily/userLeftQuota',
'lending/daily/userRedemptionQuota',
'lending/daily/token/position',
'lending/union/account',
'lending/union/purchaseRecord',
'lending/union/redemptionRecord',
'lending/union/interestHistory',
'lending/project/list',
'lending/project/position/list',
# mining endpoints
'mining/pub/algoList',
'mining/pub/coinList',
'mining/worker/detail',
'mining/worker/list',
'mining/payment/list',
'mining/statistics/user/status',
'mining/statistics/user/list',
# liquid swap endpoints
'bswap/pools',
'bswap/liquidity',
'bswap/liquidityOps',
'bswap/quote',
'bswap/swap',
# leveraged token endpoints
'blvt/tokenInfo',
'blvt/subscribe/record',
'blvt/redeem/record',
'blvt/userLimit',
],
'post': [
'asset/dust',
'account/disableFastWithdrawSwitch',
'account/enableFastWithdrawSwitch',
'capital/withdraw/apply',
'margin/transfer',
'margin/loan',
'margin/repay',
'margin/order',
'margin/isolated/create',
'margin/isolated/transfer',
'sub-account/margin/transfer',
'sub-account/margin/enable',
'sub-account/margin/enable',
'sub-account/futures/enable',
'sub-account/futures/transfer',
'sub-account/futures/internalTransfer',
'sub-account/transfer/subToSub',
'sub-account/transfer/subToMaster',
'sub-account/universalTransfer',
'userDataStream',
'userDataStream/isolated',
'futures/transfer',
'futures/loan/borrow',
'futures/loan/repay',
'futures/loan/adjustCollateral',
# lending
'lending/customizedFixed/purchase',
'lending/daily/purchase',
'lending/daily/redeem',
# liquid swap endpoints
'bswap/liquidityAdd',
'bswap/liquidityRemove',
'bswap/swap',
# leveraged token endpoints
'blvt/subscribe',
'blvt/redeem',
],
'put': [
'userDataStream',
'userDataStream/isolated',
],
'delete': [
'margin/order',
'userDataStream',
'userDataStream/isolated',
],
},
'wapi': {
'post': [
'withdraw',
'sub-account/transfer',
],
'get': [
'depositHistory',
'withdrawHistory',
'depositAddress',
'accountStatus',
'systemStatus',
'apiTradingStatus',
'userAssetDribbletLog',
'tradeFee',
'assetDetail',
'sub-account/list',
'sub-account/transfer/history',
'sub-account/assets',
],
},
'dapiPublic': {
'get': [
'ping',
'time',
'exchangeInfo',
'depth',
'trades',
'historicalTrades',
'aggTrades',
'premiumIndex',
'fundingRate',
'klines',
'continuousKlines',
'indexPriceKlines',
'markPriceKlines',
'ticker/24hr',
'ticker/price',
'ticker/bookTicker',
'allForceOrders',
'openInterest',
],
},
'dapiData': {
'get': [
'openInterestHist',
'topLongShortAccountRatio',
'topLongShortPositionRatio',
'globalLongShortAccountRatio',
'takerBuySellVol',
'basis',
],
},
'dapiPrivate': {
'get': [
'positionSide/dual',
'order',
'openOrder',
'openOrders',
'allOrders',
'balance',
'account',
'positionMargin/history',
'positionRisk',
'userTrades',
'income',
'leverageBracket',
'forceOrders',
'adlQuantile',
],
'post': [
'positionSide/dual',
'order',
'batchOrders',
'countdownCancelAll',
'leverage',
'marginType',
'positionMargin',
'listenKey',
],
'put': [
'listenKey',
],
'delete': [
'order',
'allOpenOrders',
'batchOrders',
'listenKey',
],
},
'fapiPublic': {
'get': [
'ping',
'time',
'exchangeInfo',
'depth',
'trades',
'historicalTrades',
'aggTrades',
'klines',
'fundingRate',
'premiumIndex',
'ticker/24hr',
'ticker/price',
'ticker/bookTicker',
'allForceOrders',
'openInterest',
'indexInfo',
],
},
'fapiData': {
'get': [
'openInterestHist',
'topLongShortAccountRatio',
'topLongShortPositionRatio',
'globalLongShortAccountRatio',
'takerlongshortRatio',
],
},
'fapiPrivate': {
'get': [
'allForceOrders',
'allOrders',
'openOrder',
'openOrders',
'order',
'account',
'balance',
'leverageBracket',
'positionMargin/history',
'positionRisk',
'positionSide/dual',
'userTrades',
'income',
],
'post': [
'batchOrders',
'positionSide/dual',
'positionMargin',
'marginType',
'order',
'leverage',
'listenKey',
'countdownCancelAll',
],
'put': [
'listenKey',
],
'delete': [
'batchOrders',
'order',
'allOpenOrders',
'listenKey',
],
},
'fapiPrivateV2': {
'get': [
'account',
'balance',
'positionRisk',
],
},
'v3': {
'get': [
'ticker/price',
'ticker/bookTicker',
],
},
'public': {
'get': [
'ping',
'time',
'depth',
'trades',
'aggTrades',
'historicalTrades',
'klines',
'ticker/24hr',
'ticker/price',
'ticker/bookTicker',
'exchangeInfo',
],
'put': ['userDataStream'],
'post': ['userDataStream'],
'delete': ['userDataStream'],
},
'private': {
'get': [
'allOrderList', # oco
'openOrderList', # oco
'orderList', # oco
'order',
'openOrders',
'allOrders',
'account',
'myTrades',
],
'post': [
'order/oco',
'order',
'order/test',
],
'delete': [
'openOrders', # added on 2020-04-25 for canceling all open orders per symbol
'orderList', # oco
'order',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.001,
'maker': 0.001,
},
},
'commonCurrencies': {
'BCC': 'BCC', # kept for backward-compatibility https://github.com/ccxt/ccxt/issues/4848
'YOYO': 'YOYOW',
},
# exchange-specific options
'options': {
# 'fetchTradesMethod': 'publicGetAggTrades', # publicGetTrades, publicGetHistoricalTrades
'defaultTimeInForce': 'GTC', # 'GTC' = Good To Cancel(default), 'IOC' = Immediate Or Cancel
'defaultType': 'spot', # 'spot', 'future', 'margin', 'delivery'
'hasAlreadyAuthenticatedSuccessfully': False,
'warnOnFetchOpenOrdersWithoutSymbol': True,
'recvWindow': 5 * 1000, # 5 sec, binance default
'timeDifference': 0, # the difference between system clock and Binance clock
'adjustForTimeDifference': False, # controls the adjustment logic upon instantiation
'parseOrderToPrecision': False, # force amounts and costs in parseOrder to precision
'newOrderRespType': {
'market': 'FULL', # 'ACK' for order id, 'RESULT' for full order or 'FULL' for order with fills
'limit': 'RESULT', # we change it from 'ACK' by default to 'RESULT'
},
'quoteOrderQty': True, # whether market orders support amounts in quote currency
},
# https://binance-docs.github.io/apidocs/spot/en/#error-codes-2
'exceptions': {
'You are not authorized to execute self request.': PermissionDenied, # {"msg":"You are not authorized to execute self request."}
'API key does not exist': AuthenticationError,
'Order would trigger immediately.': OrderImmediatelyFillable,
'Stop price would trigger immediately.': OrderImmediatelyFillable, # {"code":-2010,"msg":"Stop price would trigger immediately."}
'Order would immediately match and take.': OrderImmediatelyFillable, # {"code":-2010,"msg":"Order would immediately match and take."}
'Account has insufficient balance for requested action.': InsufficientFunds,
'Rest API trading is not enabled.': ExchangeNotAvailable,
"You don't have permission.": PermissionDenied, # {"msg":"You don't have permission.","success":false}
'Market is closed.': ExchangeNotAvailable, # {"code":-1013,"msg":"Market is closed."}
'Too many requests.': DDoSProtection, # {"msg":"Too many requests. Please try again later.","success":false}
'-1000': ExchangeNotAvailable, # {"code":-1000,"msg":"An unknown error occured while processing the request."}
'-1001': ExchangeNotAvailable, # 'Internal error; unable to process your request. Please try again.'
'-1002': AuthenticationError, # 'You are not authorized to execute self request.'
'-1003': RateLimitExceeded, # {"code":-1003,"msg":"Too much request weight used, current limit is 1200 request weight per 1 MINUTE. Please use the websocket for live updates to avoid polling the API."}
'-1013': InvalidOrder, # createOrder -> 'invalid quantity'/'invalid price'/MIN_NOTIONAL
'-1015': RateLimitExceeded, # 'Too many new orders; current limit is %s orders per %s.'
'-1016': ExchangeNotAvailable, # 'This service is no longer available.',
'-1020': BadRequest, # 'This operation is not supported.'
'-1021': InvalidNonce, # 'your time is ahead of server'
'-1022': AuthenticationError, # {"code":-1022,"msg":"Signature for self request is not valid."}
'-1100': BadRequest, # createOrder(symbol, 1, asdf) -> 'Illegal characters found in parameter 'price'
'-1101': BadRequest, # Too many parameters; expected %s and received %s.
'-1102': BadRequest, # Param %s or %s must be sent, but both were empty
'-1103': BadRequest, # An unknown parameter was sent.
'-1104': BadRequest, # Not all sent parameters were read, read 8 parameters but was sent 9
'-1105': BadRequest, # Parameter %s was empty.
'-1106': BadRequest, # Parameter %s sent when not required.
'-1111': BadRequest, # Precision is over the maximum defined for self asset.
'-1112': InvalidOrder, # No orders on book for symbol.
'-1114': BadRequest, # TimeInForce parameter sent when not required.
'-1115': BadRequest, # Invalid timeInForce.
'-1116': BadRequest, # Invalid orderType.
'-1117': BadRequest, # Invalid side.
'-1118': BadRequest, # New client order ID was empty.
'-1119': BadRequest, # Original client order ID was empty.
'-1120': BadRequest, # Invalid interval.
'-1121': BadSymbol, # Invalid symbol.
'-1125': AuthenticationError, # This listenKey does not exist.
'-1127': BadRequest, # More than %s hours between startTime and endTime.
'-1128': BadRequest, # {"code":-1128,"msg":"Combination of optional parameters invalid."}
'-1130': BadRequest, # Data sent for paramter %s is not valid.
'-1131': BadRequest, # recvWindow must be less than 60000
'-2010': ExchangeError, # generic error code for createOrder -> 'Account has insufficient balance for requested action.', {"code":-2010,"msg":"Rest API trading is not enabled."}, etc...
'-2011': OrderNotFound, # cancelOrder(1, 'BTC/USDT') -> 'UNKNOWN_ORDER'
'-2013': OrderNotFound, # fetchOrder(1, 'BTC/USDT') -> 'Order does not exist'
'-2014': AuthenticationError, # {"code":-2014, "msg": "API-key format invalid."}
'-2015': AuthenticationError, # "Invalid API-key, IP, or permissions for action."
'-3005': InsufficientFunds, # {"code":-3005,"msg":"Transferring out not allowed. Transfer out amount exceeds max amount."}
'-3008': InsufficientFunds, # {"code":-3008,"msg":"Borrow not allowed. Your borrow amount has exceed maximum borrow amount."}
'-3010': ExchangeError, # {"code":-3010,"msg":"Repay not allowed. Repay amount exceeds borrow amount."}
'-3022': AccountSuspended, # You account's trading is banned.
},
})
def nonce(self):
return self.milliseconds() - self.options['timeDifference']
async def fetch_time(self, params={}):
type = self.safe_string_2(self.options, 'fetchTime', 'defaultType', 'spot')
method = 'publicGetTime'
if type == 'future':
method = 'fapiPublicGetTime'
elif type == 'delivery':
method = 'dapiPublicGetTime'
response = await getattr(self, method)(params)
return self.safe_integer(response, 'serverTime')
async def load_time_difference(self, params={}):
serverTime = await self.fetch_time(params)
after = self.milliseconds()
self.options['timeDifference'] = after - serverTime
return self.options['timeDifference']
async def fetch_markets(self, params={}):
defaultType = self.safe_string_2(self.options, 'fetchMarkets', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
if (type != 'spot') and (type != 'future') and (type != 'margin') and (type != 'delivery'):
raise ExchangeError(self.id + " does not support '" + type + "' type, set exchange.options['defaultType'] to 'spot', 'margin', 'delivery' or 'future'") # eslint-disable-line quotes
method = 'publicGetExchangeInfo'
if type == 'future':
method = 'fapiPublicGetExchangeInfo'
elif type == 'delivery':
method = 'dapiPublicGetExchangeInfo'
response = await getattr(self, method)(query)
#
# spot / margin
#
# {
# "timezone":"UTC",
# "serverTime":1575416692969,
# "rateLimits":[
# {"rateLimitType":"REQUEST_WEIGHT","interval":"MINUTE","intervalNum":1,"limit":1200},
# {"rateLimitType":"ORDERS","interval":"SECOND","intervalNum":10,"limit":100},
# {"rateLimitType":"ORDERS","interval":"DAY","intervalNum":1,"limit":200000}
# ],
# "exchangeFilters":[],
# "symbols":[
# {
# "symbol":"ETHBTC",
# "status":"TRADING",
# "baseAsset":"ETH",
# "baseAssetPrecision":8,
# "quoteAsset":"BTC",
# "quotePrecision":8,
# "baseCommissionPrecision":8,
# "quoteCommissionPrecision":8,
# "orderTypes":["LIMIT","LIMIT_MAKER","MARKET","STOP_LOSS_LIMIT","TAKE_PROFIT_LIMIT"],
# "icebergAllowed":true,
# "ocoAllowed":true,
# "quoteOrderQtyMarketAllowed":true,
# "isSpotTradingAllowed":true,
# "isMarginTradingAllowed":true,
# "filters":[
# {"filterType":"PRICE_FILTER","minPrice":"0.00000100","maxPrice":"100000.00000000","tickSize":"0.00000100"},
# {"filterType":"PERCENT_PRICE","multiplierUp":"5","multiplierDown":"0.2","avgPriceMins":5},
# {"filterType":"LOT_SIZE","minQty":"0.00100000","maxQty":"100000.00000000","stepSize":"0.00100000"},
# {"filterType":"MIN_NOTIONAL","minNotional":"0.00010000","applyToMarket":true,"avgPriceMins":5},
# {"filterType":"ICEBERG_PARTS","limit":10},
# {"filterType":"MARKET_LOT_SIZE","minQty":"0.00000000","maxQty":"63100.00000000","stepSize":"0.00000000"},
# {"filterType":"MAX_NUM_ALGO_ORDERS","maxNumAlgoOrders":5}
# ]
# },
# ],
# }
#
# futures/usdt-margined(fapi)
#
# {
# "timezone":"UTC",
# "serverTime":1575417244353,
# "rateLimits":[
# {"rateLimitType":"REQUEST_WEIGHT","interval":"MINUTE","intervalNum":1,"limit":1200},
# {"rateLimitType":"ORDERS","interval":"MINUTE","intervalNum":1,"limit":1200}
# ],
# "exchangeFilters":[],
# "symbols":[
# {
# "symbol":"BTCUSDT",
# "status":"TRADING",
# "maintMarginPercent":"2.5000",
# "requiredMarginPercent":"5.0000",
# "baseAsset":"BTC",
# "quoteAsset":"USDT",
# "pricePrecision":2,
# "quantityPrecision":3,
# "baseAssetPrecision":8,
# "quotePrecision":8,
# "filters":[
# {"minPrice":"0.01","maxPrice":"100000","filterType":"PRICE_FILTER","tickSize":"0.01"},
# {"stepSize":"0.001","filterType":"LOT_SIZE","maxQty":"1000","minQty":"0.001"},
# {"stepSize":"0.001","filterType":"MARKET_LOT_SIZE","maxQty":"1000","minQty":"0.001"},
# {"limit":200,"filterType":"MAX_NUM_ORDERS"},
# {"multiplierDown":"0.8500","multiplierUp":"1.1500","multiplierDecimal":"4","filterType":"PERCENT_PRICE"}
# ],
# "orderTypes":["LIMIT","MARKET","STOP"],
# "timeInForce":["GTC","IOC","FOK","GTX"]
# }
# ]
# }
#
# delivery/coin-margined(dapi)
#
# {
# "timezone": "UTC",
# "serverTime": 1597667052958,
# "rateLimits": [
# {"rateLimitType":"REQUEST_WEIGHT","interval":"MINUTE","intervalNum":1,"limit":6000},
# {"rateLimitType":"ORDERS","interval":"MINUTE","intervalNum":1,"limit":6000}
# ],
# "exchangeFilters": [],
# "symbols": [
# {
# "symbol": "BTCUSD_200925",
# "pair": "BTCUSD",
# "contractType": "CURRENT_QUARTER",
# "deliveryDate": 1601020800000,
# "onboardDate": 1590739200000,
# "contractStatus": "TRADING",
# "contractSize": 100,
# "marginAsset": "BTC",
# "maintMarginPercent": "2.5000",
# "requiredMarginPercent": "5.0000",
# "baseAsset": "BTC",
# "quoteAsset": "USD",
# "pricePrecision": 1,
# "quantityPrecision": 0,
# "baseAssetPrecision": 8,
# "quotePrecision": 8,
# "equalQtyPrecision": 4,
# "filters": [
# {"minPrice":"0.1","maxPrice":"100000","filterType":"PRICE_FILTER","tickSize":"0.1"},
# {"stepSize":"1","filterType":"LOT_SIZE","maxQty":"100000","minQty":"1"},
# {"stepSize":"0","filterType":"MARKET_LOT_SIZE","maxQty":"100000","minQty":"1"},
# {"limit":200,"filterType":"MAX_NUM_ORDERS"},
# {"multiplierDown":"0.9500","multiplierUp":"1.0500","multiplierDecimal":"4","filterType":"PERCENT_PRICE"}
# ],
# "orderTypes": ["LIMIT","MARKET","STOP","STOP_MARKET","TAKE_PROFIT","TAKE_PROFIT_MARKET","TRAILING_STOP_MARKET"],
# "timeInForce": ["GTC","IOC","FOK","GTX"]
# },
# {
# "symbol": "BTCUSD_PERP",
# "pair": "BTCUSD",
# "contractType": "PERPETUAL",
# "deliveryDate": 4133404800000,
# "onboardDate": 1596006000000,
# "contractStatus": "TRADING",
# "contractSize": 100,
# "marginAsset": "BTC",
# "maintMarginPercent": "2.5000",
# "requiredMarginPercent": "5.0000",
# "baseAsset": "BTC",
# "quoteAsset": "USD",
# "pricePrecision": 1,
# "quantityPrecision": 0,
# "baseAssetPrecision": 8,
# "quotePrecision": 8,
# "equalQtyPrecision": 4,
# "filters": [
# {"minPrice":"0.1","maxPrice":"100000","filterType":"PRICE_FILTER","tickSize":"0.1"},
# {"stepSize":"1","filterType":"LOT_SIZE","maxQty":"100000","minQty":"1"},
# {"stepSize":"1","filterType":"MARKET_LOT_SIZE","maxQty":"100000","minQty":"1"},
# {"limit":200,"filterType":"MAX_NUM_ORDERS"},
# {"multiplierDown":"0.8500","multiplierUp":"1.1500","multiplierDecimal":"4","filterType":"PERCENT_PRICE"}
# ],
# "orderTypes": ["LIMIT","MARKET","STOP","STOP_MARKET","TAKE_PROFIT","TAKE_PROFIT_MARKET","TRAILING_STOP_MARKET"],
# "timeInForce": ["GTC","IOC","FOK","GTX"]
# }
# ]
# }
#
if self.options['adjustForTimeDifference']:
await self.load_time_difference()
markets = self.safe_value(response, 'symbols')
result = []
for i in range(0, len(markets)):
market = markets[i]
spot = (type == 'spot')
future = (type == 'future')
delivery = (type == 'delivery')
id = self.safe_string(market, 'symbol')
lowercaseId = self.safe_string_lower(market, 'symbol')
baseId = self.safe_string(market, 'baseAsset')
quoteId = self.safe_string(market, 'quoteAsset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
parts = id.split('_')
lastPart = self.safe_string(parts, 1)
idSymbol = (delivery) and (lastPart != 'PERP')
symbol = id if idSymbol else (base + '/' + quote)
filters = self.safe_value(market, 'filters', [])
filtersByType = self.index_by(filters, 'filterType')
precision = {
'base': self.safe_integer(market, 'baseAssetPrecision'),
'quote': self.safe_integer(market, 'quotePrecision'),
'amount': self.safe_integer(market, 'baseAssetPrecision'),
'price': self.safe_integer(market, 'quotePrecision'),
}
status = self.safe_string_2(market, 'status', 'contractStatus')
active = (status == 'TRADING')
margin = self.safe_value(market, 'isMarginTradingAllowed', future or delivery)
entry = {
'id': id,
'lowercaseId': lowercaseId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': type,
'spot': spot,
'margin': margin,
'future': future,
'delivery': delivery,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
}
if 'PRICE_FILTER' in filtersByType:
filter = self.safe_value(filtersByType, 'PRICE_FILTER', {})
# PRICE_FILTER reports zero values for maxPrice
# since they updated filter types in November 2018
# https://github.com/ccxt/ccxt/issues/4286
# therefore limits['price']['max'] doesn't have any meaningful value except None
entry['limits']['price'] = {
'min': self.safe_float(filter, 'minPrice'),
'max': None,
}
maxPrice = self.safe_float(filter, 'maxPrice')
if (maxPrice is not None) and (maxPrice > 0):
entry['limits']['price']['max'] = maxPrice
entry['precision']['price'] = self.precision_from_string(filter['tickSize'])
if 'LOT_SIZE' in filtersByType:
filter = self.safe_value(filtersByType, 'LOT_SIZE', {})
stepSize = self.safe_string(filter, 'stepSize')
entry['precision']['amount'] = self.precision_from_string(stepSize)
entry['limits']['amount'] = {
'min': self.safe_float(filter, 'minQty'),
'max': self.safe_float(filter, 'maxQty'),
}
if 'MARKET_LOT_SIZE' in filtersByType:
filter = self.safe_value(filtersByType, 'MARKET_LOT_SIZE', {})
entry['limits']['market'] = {
'min': self.safe_float(filter, 'minQty'),
'max': self.safe_float(filter, 'maxQty'),
}
if 'MIN_NOTIONAL' in filtersByType:
filter = self.safe_value(filtersByType, 'MIN_NOTIONAL', {})
entry['limits']['cost']['min'] = self.safe_float(filter, 'minNotional')
result.append(entry)
return result
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = amount * rate
precision = market['precision']['price']
if side == 'sell':
cost *= price
else:
key = 'base'
precision = market['precision']['amount']
cost = self.decimal_to_precision(cost, ROUND, precision, self.precisionMode)
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(cost),
}
async def fetch_balance(self, params={}):
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
method = 'privateGetAccount'
if type == 'future':
options = self.safe_value(self.options, 'future', {})
fetchBalanceOptions = self.safe_value(options, 'fetchBalance', {})
method = self.safe_string(fetchBalanceOptions, 'method', 'fapiPrivateV2GetAccount')
elif type == 'delivery':
options = self.safe_value(self.options, 'delivery', {})
fetchBalanceOptions = self.safe_value(options, 'fetchBalance', {})
method = self.safe_string(fetchBalanceOptions, 'method', 'dapiPrivateGetAccount')
elif type == 'margin':
method = 'sapiGetMarginAccount'
query = self.omit(params, 'type')
response = await getattr(self, method)(query)
#
# spot
#
# {
# makerCommission: 10,
# takerCommission: 10,
# buyerCommission: 0,
# sellerCommission: 0,
# canTrade: True,
# canWithdraw: True,
# canDeposit: True,
# updateTime: 1575357359602,
# accountType: "MARGIN",
# balances: [
# {asset: "BTC", free: "0.00219821", locked: "0.00000000" },
# ]
# }
#
# margin
#
# {
# "borrowEnabled":true,
# "marginLevel":"999.00000000",
# "totalAssetOfBtc":"0.00000000",
# "totalLiabilityOfBtc":"0.00000000",
# "totalNetAssetOfBtc":"0.00000000",
# "tradeEnabled":true,
# "transferEnabled":true,
# "userAssets":[
# {"asset":"MATIC","borrowed":"0.00000000","free":"0.00000000","interest":"0.00000000","locked":"0.00000000","netAsset":"0.00000000"},
# {"asset":"VET","borrowed":"0.00000000","free":"0.00000000","interest":"0.00000000","locked":"0.00000000","netAsset":"0.00000000"},
# {"asset":"USDT","borrowed":"0.00000000","free":"0.00000000","interest":"0.00000000","locked":"0.00000000","netAsset":"0.00000000"}
# ],
# }
#
# futures(fapi)
#
# fapiPrivateGetAccount
#
# {
# "feeTier":0,
# "canTrade":true,
# "canDeposit":true,
# "canWithdraw":true,
# "updateTime":0,
# "totalInitialMargin":"0.00000000",
# "totalMaintMargin":"0.00000000",
# "totalWalletBalance":"4.54000000",
# "totalUnrealizedProfit":"0.00000000",
# "totalMarginBalance":"4.54000000",
# "totalPositionInitialMargin":"0.00000000",
# "totalOpenOrderInitialMargin":"0.00000000",
# "maxWithdrawAmount":"4.54000000",
# "assets":[
# {
# "asset":"USDT",
# "walletBalance":"4.54000000",
# "unrealizedProfit":"0.00000000",
# "marginBalance":"4.54000000",
# "maintMargin":"0.00000000",
# "initialMargin":"0.00000000",
# "positionInitialMargin":"0.00000000",
# "openOrderInitialMargin":"0.00000000",
# "maxWithdrawAmount":"4.54000000"
# }
# ],
# "positions":[
# {
# "symbol":"BTCUSDT",
# "initialMargin":"0.00000",
# "maintMargin":"0.00000",
# "unrealizedProfit":"0.00000000",
# "positionInitialMargin":"0.00000",
# "openOrderInitialMargin":"0.00000"
# }
# ]
# }
#
# fapiPrivateV2GetAccount
#
# {
# "feeTier":0,
# "canTrade":true,
# "canDeposit":true,
# "canWithdraw":true,
# "updateTime":0,
# "totalInitialMargin":"0.00000000",
# "totalMaintMargin":"0.00000000",
# "totalWalletBalance":"0.00000000",
# "totalUnrealizedProfit":"0.00000000",
# "totalMarginBalance":"0.00000000",
# "totalPositionInitialMargin":"0.00000000",
# "totalOpenOrderInitialMargin":"0.00000000",
# "totalCrossWalletBalance":"0.00000000",
# "totalCrossUnPnl":"0.00000000",
# "availableBalance":"0.00000000",
# "maxWithdrawAmount":"0.00000000",
# "assets":[
# {
# "asset":"BNB",
# "walletBalance":"0.01000000",
# "unrealizedProfit":"0.00000000",
# "marginBalance":"0.01000000",
# "maintMargin":"0.00000000",
# "initialMargin":"0.00000000",
# "positionInitialMargin":"0.00000000",
# "openOrderInitialMargin":"0.00000000",
# "maxWithdrawAmount":"0.01000000",
# "crossWalletBalance":"0.01000000",
# "crossUnPnl":"0.00000000",
# "availableBalance":"0.01000000"
# }
# ],
# "positions":[
# {
# "symbol":"BTCUSDT",
# "initialMargin":"0",
# "maintMargin":"0",
# "unrealizedProfit":"0.00000000",
# "positionInitialMargin":"0",
# "openOrderInitialMargin":"0",
# "leverage":"20",
# "isolated":false,
# "entryPrice":"0.00000",
# "maxNotional":"5000000",
# "positionSide":"BOTH"
# },
# ]
# }
#
# fapiPrivateV2GetBalance
#
# [
# {
# "accountAlias":"FzFzXquXXqoC",
# "asset":"BNB",
# "balance":"0.01000000",
# "crossWalletBalance":"0.01000000",
# "crossUnPnl":"0.00000000",
# "availableBalance":"0.01000000",
# "maxWithdrawAmount":"0.01000000"
# }
# ]
#
result = {'info': response}
if (type == 'spot') or (type == 'margin'):
balances = self.safe_value_2(response, 'balances', 'userAssets', [])
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'asset')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'free')
account['used'] = self.safe_float(balance, 'locked')
result[code] = account
else:
balances = response
if not isinstance(response, list):
balances = self.safe_value(response, 'assets', [])
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'asset')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'availableBalance')
account['used'] = self.safe_float(balance, 'initialMargin')
account['total'] = self.safe_float_2(balance, 'marginBalance', 'balance')
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default 100, max 5000, see https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#order-book
method = 'publicGetDepth'
if market['future']:
method = 'fapiPublicGetDepth'
elif market['delivery']:
method = 'dapiPublicGetDepth'
response = await getattr(self, method)(self.extend(request, params))
orderbook = self.parse_order_book(response)
orderbook['nonce'] = self.safe_integer(response, 'lastUpdateId')
return orderbook
def parse_ticker(self, ticker, market=None):
#
# {
# symbol: 'ETHBTC',
# priceChange: '0.00068700',
# priceChangePercent: '2.075',
# weightedAvgPrice: '0.03342681',
# prevClosePrice: '0.03310300',
# lastPrice: '0.03378900',
# lastQty: '0.07700000',
# bidPrice: '0.03378900',
# bidQty: '7.16800000',
# askPrice: '0.03379000',
# askQty: '24.00000000',
# openPrice: '0.03310200',
# highPrice: '0.03388900',
# lowPrice: '0.03306900',
# volume: '205478.41000000',
# quoteVolume: '6868.48826294',
# openTime: 1601469986932,
# closeTime: 1601556386932,
# firstId: 196098772,
# lastId: 196186315,
# count: 87544
# }
#
timestamp = self.safe_integer(ticker, 'closeTime')
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_float(ticker, 'lastPrice')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'highPrice'),
'low': self.safe_float(ticker, 'lowPrice'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': self.safe_float(ticker, 'bidQty'),
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': self.safe_float(ticker, 'askQty'),
'vwap': self.safe_float(ticker, 'weightedAvgPrice'),
'open': self.safe_float(ticker, 'openPrice'),
'close': last,
'last': last,
'previousClose': self.safe_float(ticker, 'prevClosePrice'), # previous day close
'change': self.safe_float(ticker, 'priceChange'),
'percentage': self.safe_float(ticker, 'priceChangePercent'),
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'quoteVolume'),
'info': ticker,
}
async def fetch_status(self, params={}):
response = await self.wapiGetSystemStatus(params)
status = self.safe_value(response, 'status')
if status is not None:
status = 'ok' if (status == 0) else 'maintenance'
self.status = self.extend(self.status, {
'status': status,
'updated': self.milliseconds(),
})
return self.status
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = 'publicGetTicker24hr'
if market['future']:
method = 'fapiPublicGetTicker24hr'
elif market['delivery']:
method = 'dapiPublicGetTicker24hr'
response = await getattr(self, method)(self.extend(request, params))
if isinstance(response, list):
firstTicker = self.safe_value(response, 0, {})
return self.parse_ticker(firstTicker, market)
return self.parse_ticker(response, market)
def parse_tickers(self, rawTickers, symbols=None):
tickers = []
for i in range(0, len(rawTickers)):
tickers.append(self.parse_ticker(rawTickers[i]))
return self.filter_by_array(tickers, 'symbol', symbols)
async def fetch_bids_asks(self, symbols=None, params={}):
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchBidsAsks', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = None
if type == 'future':
method = 'fapiPublicGetTickerBookTicker'
elif type == 'delivery':
method = 'dapiPublicGetTickerBookTicker'
else:
method = 'publicGetTickerBookTicker'
response = await getattr(self, method)(query)
return self.parse_tickers(response, symbols)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
defaultMethod = None
if type == 'future':
defaultMethod = 'fapiPublicGetTicker24hr'
elif type == 'delivery':
defaultMethod = 'dapiPublicGetTicker24hr'
else:
defaultMethod = 'publicGetTicker24hr'
method = self.safe_string(self.options, 'fetchTickersMethod', defaultMethod)
response = await getattr(self, method)(query)
return self.parse_tickers(response, symbols)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1591478520000,
# "0.02501300",
# "0.02501800",
# "0.02500000",
# "0.02500000",
# "22.19000000",
# 1591478579999,
# "0.55490906",
# 40,
# "10.92900000",
# "0.27336462",
# "0"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit # default == max == 500
method = 'publicGetKlines'
if market['future']:
method = 'fapiPublicGetKlines'
elif market['delivery']:
method = 'dapiPublicGetKlines'
response = await getattr(self, method)(self.extend(request, params))
#
# [
# [1591478520000,"0.02501300","0.02501800","0.02500000","0.02500000","22.19000000",1591478579999,"0.55490906",40,"10.92900000","0.27336462","0"],
# [1591478580000,"0.02499600","0.02500900","0.02499400","0.02500300","21.34700000",1591478639999,"0.53370468",24,"7.53800000","0.18850725","0"],
# [1591478640000,"0.02500800","0.02501100","0.02500300","0.02500800","154.14200000",1591478699999,"3.85405839",97,"5.32300000","0.13312641","0"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
if 'isDustTrade' in trade:
return self.parse_dust_trade(trade, market)
#
# aggregate trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
#
# {
# "a": 26129, # Aggregate tradeId
# "p": "0.01633102", # Price
# "q": "4.70443515", # Quantity
# "f": 27781, # First tradeId
# "l": 27781, # Last tradeId
# "T": 1498793709153, # Timestamp
# "m": True, # Was the buyer the maker?
# "M": True # Was the trade the best price match?
# }
#
# recent public trades and old public trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#old-trade-lookup-market_data
#
# {
# "id": 28457,
# "price": "4.00000100",
# "qty": "12.00000000",
# "time": 1499865549590,
# "isBuyerMaker": True,
# "isBestMatch": True
# }
#
# private trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-trade-list-user_data
#
# {
# "symbol": "BNBBTC",
# "id": 28457,
# "orderId": 100234,
# "price": "4.00000100",
# "qty": "12.00000000",
# "commission": "10.10000000",
# "commissionAsset": "BNB",
# "time": 1499865549590,
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True
# }
#
# futures trades
# https://binance-docs.github.io/apidocs/futures/en/#account-trade-list-user_data
#
# {
# "accountId": 20,
# "buyer": False,
# "commission": "-0.07819010",
# "commissionAsset": "USDT",
# "counterPartyId": 653,
# "id": 698759,
# "maker": False,
# "orderId": 25851813,
# "price": "7819.01",
# "qty": "0.002",
# "quoteQty": "0.01563",
# "realizedPnl": "-0.91539999",
# "side": "SELL",
# "symbol": "BTCUSDT",
# "time": 1569514978020
# }
#
timestamp = self.safe_integer_2(trade, 'T', 'time')
price = self.safe_float_2(trade, 'p', 'price')
amount = self.safe_float_2(trade, 'q', 'qty')
id = self.safe_string_2(trade, 'a', 'id')
side = None
orderId = self.safe_string(trade, 'orderId')
if 'm' in trade:
side = 'sell' if trade['m'] else 'buy' # self is reversed intentionally
elif 'isBuyerMaker' in trade:
side = 'sell' if trade['isBuyerMaker'] else 'buy'
elif 'side' in trade:
side = self.safe_string_lower(trade, 'side')
else:
if 'isBuyer' in trade:
side = 'buy' if trade['isBuyer'] else 'sell' # self is a True side
fee = None
if 'commission' in trade:
fee = {
'cost': self.safe_float(trade, 'commission'),
'currency': self.safe_currency_code(self.safe_string(trade, 'commissionAsset')),
}
takerOrMaker = None
if 'isMaker' in trade:
takerOrMaker = 'maker' if trade['isMaker'] else 'taker'
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market)
cost = None
if (price is not None) and (amount is not None):
cost = price * amount
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'fromId': 123, # ID to get aggregate trades from INCLUSIVE.
# 'startTime': 456, # Timestamp in ms to get aggregate trades from INCLUSIVE.
# 'endTime': 789, # Timestamp in ms to get aggregate trades until INCLUSIVE.
# 'limit': 500, # default = 500, maximum = 1000
}
defaultType = self.safe_string_2(self.options, 'fetchTrades', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
defaultMethod = None
if type == 'future':
defaultMethod = 'fapiPublicGetAggTrades'
elif type == 'delivery':
defaultMethod = 'dapiPublicGetAggTrades'
else:
defaultMethod = 'publicGetAggTrades'
method = self.safe_string(self.options, 'fetchTradesMethod', defaultMethod)
if method == 'publicGetAggTrades':
if since is not None:
request['startTime'] = since
# https://github.com/ccxt/ccxt/issues/6400
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
request['endTime'] = self.sum(since, 3600000)
if type == 'future':
method = 'fapiPublicGetAggTrades'
elif type == 'delivery':
method = 'dapiPublicGetAggTrades'
elif method == 'publicGetHistoricalTrades':
if type == 'future':
method = 'fapiPublicGetHistoricalTrades'
elif type == 'delivery':
method = 'dapiPublicGetHistoricalTrades'
if limit is not None:
request['limit'] = limit # default = 500, maximum = 1000
#
# Caveats:
# - default limit(500) applies only if no other parameters set, trades up
# to the maximum limit may be returned to satisfy other parameters
# - if both limit and time window is set and time window contains more
# trades than the limit then the last trades from the window are returned
# - 'tradeId' accepted and returned by self method is "aggregate" trade id
# which is different from actual trade id
# - setting both fromId and time window results in error
response = await getattr(self, method)(self.extend(request, query))
#
# aggregate trades
#
# [
# {
# "a": 26129, # Aggregate tradeId
# "p": "0.01633102", # Price
# "q": "4.70443515", # Quantity
# "f": 27781, # First tradeId
# "l": 27781, # Last tradeId
# "T": 1498793709153, # Timestamp
# "m": True, # Was the buyer the maker?
# "M": True # Was the trade the best price match?
# }
# ]
#
# recent public trades and historical public trades
#
# [
# {
# "id": 28457,
# "price": "4.00000100",
# "qty": "12.00000000",
# "time": 1499865549590,
# "isBuyerMaker": True,
# "isBestMatch": True
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'NEW': 'open',
'PARTIALLY_FILLED': 'open',
'FILLED': 'closed',
'CANCELED': 'canceled',
'PENDING_CANCEL': 'canceling', # currently unused
'REJECTED': 'rejected',
'EXPIRED': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# spot
#
# {
# "symbol": "LTCBTC",
# "orderId": 1,
# "clientOrderId": "myOrder1",
# "price": "0.1",
# "origQty": "1.0",
# "executedQty": "0.0",
# "cummulativeQuoteQty": "0.0",
# "status": "NEW",
# "timeInForce": "GTC",
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": "0.0",
# "icebergQty": "0.0",
# "time": 1499827319559,
# "updateTime": 1499827319559,
# "isWorking": True
# }
#
# futures
#
# {
# "symbol": "BTCUSDT",
# "orderId": 1,
# "clientOrderId": "myOrder1",
# "price": "0.1",
# "origQty": "1.0",
# "executedQty": "1.0",
# "cumQuote": "10.0",
# "status": "NEW",
# "timeInForce": "GTC",
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": "0.0",
# "updateTime": 1499827319559
# }
#
status = self.parse_order_status(self.safe_string(order, 'status'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = None
if 'time' in order:
timestamp = self.safe_integer(order, 'time')
elif 'transactTime' in order:
timestamp = self.safe_integer(order, 'transactTime')
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'origQty')
filled = self.safe_float(order, 'executedQty')
remaining = None
# - Spot/Margin market: cummulativeQuoteQty
# - Futures market: cumQuote.
# Note self is not the actual cost, since Binance futures uses leverage to calculate margins.
cost = self.safe_float_2(order, 'cummulativeQuoteQty', 'cumQuote')
if filled is not None:
if amount is not None:
remaining = amount - filled
if self.options['parseOrderToPrecision']:
remaining = float(self.amount_to_precision(symbol, remaining))
remaining = max(remaining, 0.0)
if price is not None:
if cost is None:
cost = price * filled
id = self.safe_string(order, 'orderId')
type = self.safe_string_lower(order, 'type')
if type == 'market':
if price == 0.0:
if (cost is not None) and (filled is not None):
if (cost > 0) and (filled > 0):
price = cost / filled
if self.options['parseOrderToPrecision']:
price = float(self.price_to_precision(symbol, price))
elif type == 'limit_maker':
type = 'limit'
side = self.safe_string_lower(order, 'side')
fee = None
trades = None
fills = self.safe_value(order, 'fills')
if fills is not None:
trades = self.parse_trades(fills, market)
numTrades = len(trades)
if numTrades > 0:
cost = trades[0]['cost']
fee = {
'cost': trades[0]['fee']['cost'],
'currency': trades[0]['fee']['currency'],
}
for i in range(1, len(trades)):
cost = self.sum(cost, trades[i]['cost'])
fee['cost'] = self.sum(fee['cost'], trades[i]['fee']['cost'])
average = None
if cost is not None:
if filled:
average = cost / filled
if self.options['parseOrderToPrecision']:
average = float(self.price_to_precision(symbol, average))
if self.options['parseOrderToPrecision']:
cost = float(self.cost_to_precision(symbol, cost))
clientOrderId = self.safe_string(order, 'clientOrderId')
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = (type == 'limit_maker') or (timeInForce == 'GTX')
stopPrice = self.safe_float(order, 'stopPrice')
return {
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'createOrder', 'defaultType', market['type'])
orderType = self.safe_string(params, 'type', defaultType)
clientOrderId = self.safe_string_2(params, 'newClientOrderId', 'clientOrderId')
params = self.omit(params, ['type', 'newClientOrderId', 'clientOrderId'])
method = 'privatePostOrder'
if orderType == 'future':
method = 'fapiPrivatePostOrder'
elif orderType == 'delivery':
method = 'dapiPrivatePostOrder'
elif orderType == 'margin':
method = 'sapiPostMarginOrder'
# the next 5 lines are added to support for testing orders
if market['spot']:
test = self.safe_value(params, 'test', False)
if test:
method += 'Test'
params = self.omit(params, 'test')
uppercaseType = type.upper()
validOrderTypes = self.safe_value(market['info'], 'orderTypes')
if not self.in_array(uppercaseType, validOrderTypes):
raise InvalidOrder(self.id + ' ' + type + ' is not a valid order type in ' + market['type'] + ' market ' + symbol)
request = {
'symbol': market['id'],
'type': uppercaseType,
'side': side.upper(),
}
if clientOrderId is not None:
request['newClientOrderId'] = clientOrderId
if market['spot']:
request['newOrderRespType'] = self.safe_value(self.options['newOrderRespType'], type, 'RESULT') # 'ACK' for order id, 'RESULT' for full order or 'FULL' for order with fills
# additional required fields depending on the order type
timeInForceIsRequired = False
priceIsRequired = False
stopPriceIsRequired = False
quantityIsRequired = False
#
# spot/margin
#
# LIMIT timeInForce, quantity, price
# MARKET quantity or quoteOrderQty
# STOP_LOSS quantity, stopPrice
# STOP_LOSS_LIMIT timeInForce, quantity, price, stopPrice
# TAKE_PROFIT quantity, stopPrice
# TAKE_PROFIT_LIMIT timeInForce, quantity, price, stopPrice
# LIMIT_MAKER quantity, price
#
# futures
#
# LIMIT timeInForce, quantity, price
# MARKET quantity
# STOP/TAKE_PROFIT quantity, price, stopPrice
# STOP_MARKET stopPrice
# TAKE_PROFIT_MARKET stopPrice
# TRAILING_STOP_MARKET callbackRate
#
if uppercaseType == 'MARKET':
quoteOrderQty = self.safe_value(self.options, 'quoteOrderQty', False)
if quoteOrderQty:
quoteOrderQty = self.safe_float(params, 'quoteOrderQty')
precision = market['precision']['price']
if quoteOrderQty is not None:
request['quoteOrderQty'] = self.decimal_to_precision(quoteOrderQty, TRUNCATE, precision, self.precisionMode)
params = self.omit(params, 'quoteOrderQty')
elif price is not None:
request['quoteOrderQty'] = self.decimal_to_precision(amount * price, TRUNCATE, precision, self.precisionMode)
else:
quantityIsRequired = True
else:
quantityIsRequired = True
elif uppercaseType == 'LIMIT':
priceIsRequired = True
timeInForceIsRequired = True
quantityIsRequired = True
elif (uppercaseType == 'STOP_LOSS') or (uppercaseType == 'TAKE_PROFIT'):
stopPriceIsRequired = True
quantityIsRequired = True
if market['future']:
priceIsRequired = True
elif (uppercaseType == 'STOP_LOSS_LIMIT') or (uppercaseType == 'TAKE_PROFIT_LIMIT'):
quantityIsRequired = True
stopPriceIsRequired = True
priceIsRequired = True
timeInForceIsRequired = True
elif uppercaseType == 'LIMIT_MAKER':
priceIsRequired = True
quantityIsRequired = True
elif uppercaseType == 'STOP':
quantityIsRequired = True
stopPriceIsRequired = True
priceIsRequired = True
elif (uppercaseType == 'STOP_MARKET') or (uppercaseType == 'TAKE_PROFIT_MARKET'):
closePosition = self.safe_value(params, 'closePosition')
if closePosition is None:
quantityIsRequired = True
stopPriceIsRequired = True
elif uppercaseType == 'TRAILING_STOP_MARKET':
quantityIsRequired = True
callbackRate = self.safe_float(params, 'callbackRate')
if callbackRate is None:
raise InvalidOrder(self.id + ' createOrder method requires a callbackRate extra param for a ' + type + ' order')
if quantityIsRequired:
request['quantity'] = self.amount_to_precision(symbol, amount)
if priceIsRequired:
if price is None:
raise InvalidOrder(self.id + ' createOrder method requires a price argument for a ' + type + ' order')
request['price'] = self.price_to_precision(symbol, price)
if timeInForceIsRequired:
request['timeInForce'] = self.options['defaultTimeInForce'] # 'GTC' = Good To Cancel(default), 'IOC' = Immediate Or Cancel
if stopPriceIsRequired:
stopPrice = self.safe_float(params, 'stopPrice')
if stopPrice is None:
raise InvalidOrder(self.id + ' createOrder method requires a stopPrice extra param for a ' + type + ' order')
else:
params = self.omit(params, 'stopPrice')
request['stopPrice'] = self.price_to_precision(symbol, stopPrice)
response = await getattr(self, method)(self.extend(request, params))
return self.parse_order(response, market)
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
method = 'privateGetOrder'
if type == 'future':
method = 'fapiPrivateGetOrder'
elif type == 'delivery':
method = 'dapiPrivateGetOrder'
elif type == 'margin':
method = 'sapiGetMarginOrder'
request = {
'symbol': market['id'],
}
clientOrderId = self.safe_value_2(params, 'origClientOrderId', 'clientOrderId')
if clientOrderId is not None:
request['origClientOrderId'] = clientOrderId
else:
request['orderId'] = id
query = self.omit(params, ['type', 'clientOrderId', 'origClientOrderId'])
response = await getattr(self, method)(self.extend(request, query))
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrders', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
method = 'privateGetAllOrders'
if type == 'future':
method = 'fapiPrivateGetAllOrders'
elif type == 'delivery':
method = 'dapiPrivateGetAllOrders'
elif type == 'margin':
method = 'sapiGetMarginAllOrders'
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
query = self.omit(params, 'type')
response = await getattr(self, method)(self.extend(request, query))
#
# spot
#
# [
# {
# "symbol": "LTCBTC",
# "orderId": 1,
# "clientOrderId": "myOrder1",
# "price": "0.1",
# "origQty": "1.0",
# "executedQty": "0.0",
# "cummulativeQuoteQty": "0.0",
# "status": "NEW",
# "timeInForce": "GTC",
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": "0.0",
# "icebergQty": "0.0",
# "time": 1499827319559,
# "updateTime": 1499827319559,
# "isWorking": True
# }
# ]
#
# futures
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": 1,
# "clientOrderId": "myOrder1",
# "price": "0.1",
# "origQty": "1.0",
# "executedQty": "1.0",
# "cumQuote": "10.0",
# "status": "NEW",
# "timeInForce": "GTC",
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": "0.0",
# "updateTime": 1499827319559
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
query = None
type = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
defaultType = self.safe_string_2(self.options, 'fetchOpenOrders', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
elif self.options['warnOnFetchOpenOrdersWithoutSymbol']:
symbols = self.symbols
numSymbols = len(symbols)
fetchOpenOrdersRateLimit = int(numSymbols / 2)
raise ExchangeError(self.id + ' fetchOpenOrders WARNING: fetching open orders without specifying a symbol is rate-limited to one call per ' + str(fetchOpenOrdersRateLimit) + ' seconds. Do not call self method frequently to avoid ban. Set ' + self.id + '.options["warnOnFetchOpenOrdersWithoutSymbol"] = False to suppress self warning message.')
else:
defaultType = self.safe_string_2(self.options, 'fetchOpenOrders', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = 'privateGetOpenOrders'
if type == 'future':
method = 'fapiPrivateGetOpenOrders'
elif type == 'delivery':
method = 'dapiPrivateGetOpenOrders'
elif type == 'margin':
method = 'sapiGetMarginOpenOrders'
response = await getattr(self, method)(self.extend(request, query))
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = await self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOpenOrders', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
# https://github.com/ccxt/ccxt/issues/6507
origClientOrderId = self.safe_value_2(params, 'origClientOrderId', 'clientOrderId')
request = {
'symbol': market['id'],
# 'orderId': id,
# 'origClientOrderId': id,
}
if origClientOrderId is None:
request['orderId'] = id
else:
request['origClientOrderId'] = origClientOrderId
method = 'privateDeleteOrder'
if type == 'future':
method = 'fapiPrivateDeleteOrder'
elif type == 'delivery':
method = 'dapiPrivateDeleteOrder'
elif type == 'margin':
method = 'sapiDeleteMarginOrder'
query = self.omit(params, ['type', 'origClientOrderId', 'clientOrderId'])
response = await getattr(self, method)(self.extend(request, query))
return self.parse_order(response)
async def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
defaultType = self.safe_string_2(self.options, 'cancelAllOrders', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = 'privateDeleteOpenOrders'
if type == 'future':
method = 'fapiPrivateDeleteAllOpenOrders'
elif type == 'delivery':
method = 'dapiPrivateDeleteAllOpenOrders'
response = await getattr(self, method)(self.extend(request, query))
if isinstance(response, list):
return self.parse_orders(response, market)
else:
return response
async def fetch_positions(self, symbols=None, since=None, limit=None, params={}):
await self.load_markets()
response = await self.fetch_balance(params)
info = self.safe_value(response, 'info', {})
#
# futures, delivery
#
# {
# "feeTier":0,
# "canTrade":true,
# "canDeposit":true,
# "canWithdraw":true,
# "updateTime":0,
# "assets":[
# {
# "asset":"ETH",
# "walletBalance":"0.09886711",
# "unrealizedProfit":"0.00000000",
# "marginBalance":"0.09886711",
# "maintMargin":"0.00000000",
# "initialMargin":"0.00000000",
# "positionInitialMargin":"0.00000000",
# "openOrderInitialMargin":"0.00000000",
# "maxWithdrawAmount":"0.09886711",
# "crossWalletBalance":"0.09886711",
# "crossUnPnl":"0.00000000",
# "availableBalance":"0.09886711"
# }
# ],
# "positions":[
# {
# "symbol":"BTCUSD_201225",
# "initialMargin":"0",
# "maintMargin":"0",
# "unrealizedProfit":"0.00000000",
# "positionInitialMargin":"0",
# "openOrderInitialMargin":"0",
# "leverage":"20",
# "isolated":false,
# "positionSide":"BOTH",
# "entryPrice":"0.00000000",
# "maxQty":"250", # "maxNotional" on futures
# },
# ]
# }
#
positions = self.safe_value_2(info, 'positions', 'userAssets', [])
# todo unify parsePosition/parsePositions
return positions
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchMyTrades', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = None
if type == 'spot':
method = 'privateGetMyTrades'
elif type == 'future':
method = 'fapiPrivateGetUserTrades'
elif type == 'delivery':
method = 'dapiPrivateGetUserTrades'
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = await getattr(self, method)(self.extend(request, params))
#
# spot trade
#
# [
# {
# "symbol": "BNBBTC",
# "id": 28457,
# "orderId": 100234,
# "price": "4.00000100",
# "qty": "12.00000000",
# "commission": "10.10000000",
# "commissionAsset": "BNB",
# "time": 1499865549590,
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True,
# }
# ]
#
# futures trade
#
# [
# {
# "accountId": 20,
# "buyer": False,
# "commission": "-0.07819010",
# "commissionAsset": "USDT",
# "counterPartyId": 653,
# "id": 698759,
# "maker": False,
# "orderId": 25851813,
# "price": "7819.01",
# "qty": "0.002",
# "quoteQty": "0.01563",
# "realizedPnl": "-0.91539999",
# "side": "SELL",
# "symbol": "BTCUSDT",
# "time": 1569514978020
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def fetch_my_dust_trades(self, symbol=None, since=None, limit=None, params={}):
#
# Binance provides an opportunity to trade insignificant(i.e. non-tradable and non-withdrawable)
# token leftovers(of any asset) into `BNB` coin which in turn can be used to pay trading fees with it.
# The corresponding trades history is called the `Dust Log` and can be requested via the following end-point:
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#dustlog-user_data
#
await self.load_markets()
response = await self.wapiGetUserAssetDribbletLog(params)
# {success: True,
# results: {total: 1,
# rows: [{ transfered_total: "1.06468458",
# service_charge_total: "0.02172826",
# tran_id: 2701371634,
# logs: [{ tranId: 2701371634,
# serviceChargeAmount: "0.00012819",
# uid: "35103861",
# amount: "0.8012",
# operateTime: "2018-10-07 17:56:07",
# transferedAmount: "0.00628141",
# fromAsset: "ADA" }],
# operate_time: "2018-10-07 17:56:06" }]}}
results = self.safe_value(response, 'results', {})
rows = self.safe_value(results, 'rows', [])
data = []
for i in range(0, len(rows)):
logs = rows[i]['logs']
for j in range(0, len(logs)):
logs[j]['isDustTrade'] = True
data.append(logs[j])
trades = self.parse_trades(data, None, since, limit)
return self.filter_by_since_limit(trades, since, limit)
def parse_dust_trade(self, trade, market=None):
# { tranId: 2701371634,
# serviceChargeAmount: "0.00012819",
# uid: "35103861",
# amount: "0.8012",
# operateTime: "2018-10-07 17:56:07",
# transferedAmount: "0.00628141",
# fromAsset: "ADA" },
orderId = self.safe_string(trade, 'tranId')
timestamp = self.parse8601(self.safe_string(trade, 'operateTime'))
tradedCurrency = self.safe_currency_code(self.safe_string(trade, 'fromAsset'))
earnedCurrency = self.currency('BNB')['code']
applicantSymbol = earnedCurrency + '/' + tradedCurrency
tradedCurrencyIsQuote = False
if applicantSymbol in self.markets:
tradedCurrencyIsQuote = True
#
# Warning
# Binance dust trade `fee` is already excluded from the `BNB` earning reported in the `Dust Log`.
# So the parser should either set the `fee.cost` to `0` or add it on top of the earned
# BNB `amount`(or `cost` depending on the trade `side`). The second of the above options
# is much more illustrative and therefore preferable.
#
fee = {
'currency': earnedCurrency,
'cost': self.safe_float(trade, 'serviceChargeAmount'),
}
symbol = None
amount = None
cost = None
side = None
if tradedCurrencyIsQuote:
symbol = applicantSymbol
amount = self.sum(self.safe_float(trade, 'transferedAmount'), fee['cost'])
cost = self.safe_float(trade, 'amount')
side = 'buy'
else:
symbol = tradedCurrency + '/' + earnedCurrency
amount = self.safe_float(trade, 'amount')
cost = self.sum(self.safe_float(trade, 'transferedAmount'), fee['cost'])
side = 'sell'
price = None
if cost is not None:
if amount:
price = cost / amount
id = None
type = None
takerOrMaker = None
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'amount': amount,
'price': price,
'cost': cost,
'fee': fee,
'info': trade,
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
currency = None
request = {}
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['startTime'] = since
# max 3 months range https://github.com/ccxt/ccxt/issues/6495
request['endTime'] = self.sum(since, 7776000000)
response = await self.wapiGetDepositHistory(self.extend(request, params))
#
# { success: True,
# depositList: [{insertTime: 1517425007000,
# amount: 0.3,
# address: "0x0123456789abcdef",
# addressTag: "",
# txId: "0x0123456789abcdef",
# asset: "ETH",
# status: 1 }]}
#
return self.parse_transactions(response['depositList'], currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
currency = None
request = {}
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['startTime'] = since
# max 3 months range https://github.com/ccxt/ccxt/issues/6495
request['endTime'] = self.sum(since, 7776000000)
response = await self.wapiGetWithdrawHistory(self.extend(request, params))
#
# {withdrawList: [{ amount: 14,
# address: "0x0123456789abcdef...",
# successTime: 1514489710000,
# transactionFee: 0.01,
# addressTag: "",
# txId: "0x0123456789abcdef...",
# id: "0123456789abcdef...",
# asset: "ETH",
# applyTime: 1514488724000,
# status: 6 },
# { amount: 7600,
# address: "0x0123456789abcdef...",
# successTime: 1515323226000,
# transactionFee: 0.01,
# addressTag: "",
# txId: "0x0123456789abcdef...",
# id: "0123456789abcdef...",
# asset: "ICN",
# applyTime: 1515322539000,
# status: 6 } ],
# success: True }
#
return self.parse_transactions(response['withdrawList'], currency, since, limit)
def parse_transaction_status_by_type(self, status, type=None):
statusesByType = {
'deposit': {
'0': 'pending',
'1': 'ok',
},
'withdrawal': {
'0': 'pending', # Email Sent
'1': 'canceled', # Cancelled(different from 1 = ok in deposits)
'2': 'pending', # Awaiting Approval
'3': 'failed', # Rejected
'4': 'pending', # Processing
'5': 'failed', # Failure
'6': 'ok', # Completed
},
}
statuses = self.safe_value(statusesByType, type, {})
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# insertTime: 1517425007000,
# amount: 0.3,
# address: "0x0123456789abcdef",
# addressTag: "",
# txId: "0x0123456789abcdef",
# asset: "ETH",
# status: 1
# }
#
# fetchWithdrawals
#
# {
# amount: 14,
# address: "0x0123456789abcdef...",
# successTime: 1514489710000,
# transactionFee: 0.01,
# addressTag: "",
# txId: "0x0123456789abcdef...",
# id: "0123456789abcdef...",
# asset: "ETH",
# applyTime: 1514488724000,
# status: 6
# }
#
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'addressTag') # set but unused
if tag is not None:
if len(tag) < 1:
tag = None
txid = self.safe_string(transaction, 'txId')
if (txid is not None) and (txid.find('Internal transfer ') >= 0):
txid = txid[18:]
currencyId = self.safe_string(transaction, 'asset')
code = self.safe_currency_code(currencyId, currency)
timestamp = None
insertTime = self.safe_integer(transaction, 'insertTime')
applyTime = self.safe_integer(transaction, 'applyTime')
type = self.safe_string(transaction, 'type')
if type is None:
if (insertTime is not None) and (applyTime is None):
type = 'deposit'
timestamp = insertTime
elif (insertTime is None) and (applyTime is not None):
type = 'withdrawal'
timestamp = applyTime
status = self.parse_transaction_status_by_type(self.safe_string(transaction, 'status'), type)
amount = self.safe_float(transaction, 'amount')
feeCost = self.safe_float(transaction, 'transactionFee')
fee = None
if feeCost is not None:
fee = {'currency': code, 'cost': feeCost}
updated = self.safe_integer(transaction, 'successTime')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': address,
'addressFrom': None,
'tag': tag,
'tagTo': tag,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = await self.wapiGetDepositAddress(self.extend(request, params))
success = self.safe_value(response, 'success')
if (success is None) or not success:
raise InvalidAddress(self.id + ' fetchDepositAddress returned an empty response – create the deposit address in the user settings first.')
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'addressTag')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
async def fetch_funding_fees(self, codes=None, params={}):
response = await self.wapiGetAssetDetail(params)
#
# {
# "success": True,
# "assetDetail": {
# "CTR": {
# "minWithdrawAmount": "70.00000000", #min withdraw amount
# "depositStatus": False,//deposit status
# "withdrawFee": 35, # withdraw fee
# "withdrawStatus": True, #withdraw status
# "depositTip": "Delisted, Deposit Suspended" #reason
# },
# "SKY": {
# "minWithdrawAmount": "0.02000000",
# "depositStatus": True,
# "withdrawFee": 0.01,
# "withdrawStatus": True
# }
# }
# }
#
detail = self.safe_value(response, 'assetDetail', {})
ids = list(detail.keys())
withdrawFees = {}
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
withdrawFees[code] = self.safe_float(detail[id], 'withdrawFee')
return {
'withdraw': withdrawFees,
'deposit': {},
'info': response,
}
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
# name is optional, can be overrided via params
name = address[0:20]
request = {
'asset': currency['id'],
'address': address,
'amount': float(amount),
'name': name, # name is optional, can be overrided via params
# https://binance-docs.github.io/apidocs/spot/en/#withdraw-sapi
# issue sapiGetCapitalConfigGetall() to get networks for withdrawing USDT ERC20 vs USDT Omni
# 'network': 'ETH', # 'BTC', 'TRX', etc, optional
}
if tag is not None:
request['addressTag'] = tag
response = await self.wapiPostWithdraw(self.extend(request, params))
return {
'info': response,
'id': self.safe_string(response, 'id'),
}
def parse_trading_fee(self, fee, market=None):
#
# {
# "symbol": "ADABNB",
# "maker": 0.9000,
# "taker": 1.0000
# }
#
marketId = self.safe_string(fee, 'symbol')
symbol = self.safe_symbol(marketId)
return {
'info': fee,
'symbol': symbol,
'maker': self.safe_float(fee, 'maker'),
'taker': self.safe_float(fee, 'taker'),
}
async def fetch_trading_fee(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.wapiGetTradeFee(self.extend(request, params))
#
# {
# "tradeFee": [
# {
# "symbol": "ADABNB",
# "maker": 0.9000,
# "taker": 1.0000
# }
# ],
# "success": True
# }
#
tradeFee = self.safe_value(response, 'tradeFee', [])
first = self.safe_value(tradeFee, 0, {})
return self.parse_trading_fee(first)
async def fetch_trading_fees(self, params={}):
await self.load_markets()
response = await self.wapiGetTradeFee(params)
#
# {
# "tradeFee": [
# {
# "symbol": "ADABNB",
# "maker": 0.9000,
# "taker": 1.0000
# }
# ],
# "success": True
# }
#
tradeFee = self.safe_value(response, 'tradeFee', [])
result = {}
for i in range(0, len(tradeFee)):
fee = self.parse_trading_fee(tradeFee[i])
symbol = fee['symbol']
result[symbol] = fee
return result
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
if not (api in self.urls['api']):
raise NotSupported(self.id + ' does not have a testnet/sandbox URL for ' + api + ' endpoints')
url = self.urls['api'][api]
url += '/' + path
if api == 'wapi':
url += '.html'
userDataStream = (path == 'userDataStream') or (path == 'listenKey')
if path == 'historicalTrades':
if self.apiKey:
headers = {
'X-MBX-APIKEY': self.apiKey,
}
else:
raise AuthenticationError(self.id + ' historicalTrades endpoint requires `apiKey` credential')
elif userDataStream:
if self.apiKey:
# v1 special case for userDataStream
body = self.urlencode(params)
headers = {
'X-MBX-APIKEY': self.apiKey,
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
raise AuthenticationError(self.id + ' userDataStream endpoint requires `apiKey` credential')
if (api == 'private') or (api == 'sapi') or (api == 'wapi' and path != 'systemStatus') or (api == 'dapiPrivate') or (api == 'fapiPrivate') or (api == 'fapiPrivateV2'):
self.check_required_credentials()
query = None
recvWindow = self.safe_integer(self.options, 'recvWindow', 5000)
if (api == 'sapi') and (path == 'asset/dust'):
query = self.urlencode_with_array_repeat(self.extend({
'timestamp': self.nonce(),
'recvWindow': recvWindow,
}, params))
elif (path == 'batchOrders') or (path.find('sub-account') >= 0):
query = self.rawencode(self.extend({
'timestamp': self.nonce(),
'recvWindow': recvWindow,
}, params))
else:
query = self.urlencode(self.extend({
'timestamp': self.nonce(),
'recvWindow': recvWindow,
}, params))
signature = self.hmac(self.encode(query), self.encode(self.secret))
query += '&' + 'signature=' + signature
headers = {
'X-MBX-APIKEY': self.apiKey,
}
if (method == 'GET') or (method == 'DELETE') or (api == 'wapi'):
url += '?' + query
else:
body = query
headers['Content-Type'] = 'application/x-www-form-urlencoded'
else:
# userDataStream endpoints are public, but POST, PUT, DELETE
# therefore they don't accept URL query arguments
# https://github.com/ccxt/ccxt/issues/5224
if not userDataStream:
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if (code == 418) or (code == 429):
raise DDoSProtection(self.id + ' ' + str(code) + ' ' + reason + ' ' + body)
# error response in a form: {"code": -1013, "msg": "Invalid quantity."}
# following block cointains legacy checks against message patterns in "msg" property
# will switch "code" checks eventually, when we know all of them
if code >= 400:
if body.find('Price * QTY is zero or less') >= 0:
raise InvalidOrder(self.id + ' order cost = amount * price is zero or less ' + body)
if body.find('LOT_SIZE') >= 0:
raise InvalidOrder(self.id + ' order amount should be evenly divisible by lot size ' + body)
if body.find('PRICE_FILTER') >= 0:
raise InvalidOrder(self.id + ' order price is invalid, i.e. exceeds allowed price precision, exceeds min price or max price limits or is invalid float value in general, use self.price_to_precision(symbol, amount) ' + body)
if response is None:
return # fallback to default error handler
# check success value for wapi endpoints
# response in format {'msg': 'The coin does not exist.', 'success': True/false}
success = self.safe_value(response, 'success', True)
if not success:
message = self.safe_string(response, 'msg')
parsedMessage = None
if message is not None:
try:
parsedMessage = json.loads(message)
except Exception as e:
# do nothing
parsedMessage = None
if parsedMessage is not None:
response = parsedMessage
message = self.safe_string(response, 'msg')
if message is not None:
self.throw_exactly_matched_exception(self.exceptions, message, self.id + ' ' + message)
# checks against error codes
error = self.safe_string(response, 'code')
if error is not None:
# https://github.com/ccxt/ccxt/issues/6501
# https://github.com/ccxt/ccxt/issues/7742
if (error == '200') or (error == '0'):
return
# a workaround for {"code":-2015,"msg":"Invalid API-key, IP, or permissions for action."}
# despite that their message is very confusing, it is raised by Binance
# on a temporary ban, the API key is valid, but disabled for a while
if (error == '-2015') and self.options['hasAlreadyAuthenticatedSuccessfully']:
raise DDoSProtection(self.id + ' temporary banned: ' + body)
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, error, feedback)
raise ExchangeError(feedback)
if not success:
raise ExchangeError(self.id + ' ' + body)
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
# a workaround for {"code":-2015,"msg":"Invalid API-key, IP, or permissions for action."}
if (api == 'private') or (api == 'wapi'):
self.options['hasAlreadyAuthenticatedSuccessfully'] = True
return response
| 45.27789 | 355 | 0.478034 |
b829d0031ef2cca9076c7685d49f9c94bad9b50c | 4,691 | py | Python | textattack/attention_models/han.py | MINGG2333/TextAttack | 7667d80f52140aa3cea83b63ce404cad17561dbb | [
"MIT"
]
| null | null | null | textattack/attention_models/han.py | MINGG2333/TextAttack | 7667d80f52140aa3cea83b63ce404cad17561dbb | [
"MIT"
]
| null | null | null | textattack/attention_models/han.py | MINGG2333/TextAttack | 7667d80f52140aa3cea83b63ce404cad17561dbb | [
"MIT"
]
| null | null | null | import torch
from torch import nn
from .utils import preprocess, rev_label_map
import json
import os
import gzip
import os
import sys
import io
import re
import random
import csv
import numpy as np
from nltk.tokenize import PunktSentenceTokenizer, TreebankWordTokenizer
from .han_model import HierarchialAttentionNetwork
#from PIL import Image, ImageDraw, ImageFont
csv.field_size_limit(sys.maxsize)
n_classes = 2
word_rnn_size = 50 # word RNN size
sentence_rnn_size = 50 # character RNN size
word_rnn_layers = 1 # number of layers in character RNN
sentence_rnn_layers = 1 # number of layers in word RNN
word_att_size = 100 # size of the word-level attention layer (also the size of the word context vector)
sentence_att_size = 100 # size of the sentence-level attention layer (also the size of the sentence context vector)
dropout = 0.3 # dropout
fine_tune_word_embeddings = True # fine-tune word embeddings?
class HAN():
def __init__(self, path=None):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.data_folder = os.path.dirname(path)
with open(os.path.join(self.data_folder, 'word_map.json'), 'r') as j:
self.word_map = json.load(j)
self.model = HierarchialAttentionNetwork(n_classes=n_classes,
vocab_size=len(self.word_map),
emb_size=200,
word_rnn_size=word_rnn_size,
sentence_rnn_size=sentence_rnn_size,
word_rnn_layers=word_rnn_layers,
sentence_rnn_layers=sentence_rnn_layers,
word_att_size=word_att_size,
sentence_att_size=sentence_att_size,
dropout=dropout)
# self.model.load_state_dict(torch.load(path, map_location=self.device))
self.model = self.model.to(self.device)
self.model.eval()
def classify(self,document):
"""
Classify a document with the Hierarchial Attention Network (HAN).
:param document: a document in text form
:return: pre-processed tokenized document, class scores, attention weights for words, attention weights for sentences, sentence lengths
"""
sentence_limit = 1000
word_limit = 1000
word_map = self.word_map
sent_tokenizer = PunktSentenceTokenizer()
word_tokenizer = TreebankWordTokenizer()
# A list to store the document tokenized into words
model = self.model
device = self.device
doc = list()
# Tokenize document into sentences
sentences = list()
for paragraph in preprocess(document).splitlines():
sentences.extend([s for s in sent_tokenizer.tokenize(paragraph)])
# Tokenize sentences into words
for s in sentences[:sentence_limit]:
w = s.split(" ")
w = w[:word_limit]
#w = word_tokenizer.tokenize(s)[:word_limit]
if len(w) == 0:
continue
doc.append(w)
# Number of sentences in the document
sentences_in_doc = len(doc)
sentences_in_doc = torch.LongTensor([sentences_in_doc]).to(device) # (1)
# Number of words in each sentence
words_in_each_sentence = list(map(lambda s: len(s), doc))
words_in_each_sentence = torch.LongTensor(words_in_each_sentence).unsqueeze(0).to(device) # (1, n_sentences)
# Encode document with indices from the word map
encoded_doc = list(
map(lambda s: list(map(lambda w: word_map.get(w, word_map['<unk>']), s)) + [0] * (word_limit - len(s)),
doc)) + [[0] * word_limit] * (sentence_limit - len(doc))
encoded_doc = torch.LongTensor(encoded_doc).unsqueeze(0).to(device)
# Apply the HAN model
scores, word_alphas, sentence_alphas = model(encoded_doc, sentences_in_doc,
words_in_each_sentence) # (1, n_classes), (1, n_sentences, max_sent_len_in_document), (1, n_sentences)
scores = scores.squeeze(0) # (n_classes)
scores = nn.functional.softmax(scores, dim=0) # (n_classes)
word_alphas = word_alphas.squeeze(0) # (n_sentences, max_sent_len_in_document)
sentence_alphas = sentence_alphas.squeeze(0) # (n_sentences)
words_in_each_sentence = words_in_each_sentence.squeeze(0) # (n_sentences)
return doc, scores, word_alphas, sentence_alphas
| 42.645455 | 155 | 0.626093 |
6906f4bc1ab2c9bb0c5b90344d59239c8fce2686 | 5,829 | py | Python | website/views.py | hms-dbmi/cistrome-explorer-higlass-server | df811491c7bb757245d7e6804a0a54e356ded54e | [
"MIT"
]
| 11 | 2016-11-03T20:19:15.000Z | 2018-08-28T09:49:02.000Z | website/views.py | hms-dbmi/cistrome-explorer-higlass-server | df811491c7bb757245d7e6804a0a54e356ded54e | [
"MIT"
]
| 44 | 2016-12-12T23:57:13.000Z | 2018-06-27T19:32:36.000Z | website/views.py | hms-dbmi/cistrome-explorer-higlass-server | df811491c7bb757245d7e6804a0a54e356ded54e | [
"MIT"
]
| 6 | 2016-11-09T20:11:04.000Z | 2018-05-29T21:44:38.000Z | import subprocess
import pyppeteer
import asyncio
import logging
import os
import os.path as op
from pyppeteer import launch
import tempfile
import tilesets.models as tm
import higlass_server.settings as hss
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest, HttpResponse, \
HttpResponseNotFound, HttpResponseBadRequest
logger = logging.getLogger(__name__)
def link(request):
'''Generate a small page containing the metadata necessary for
link unfurling by Slack or Twitter. The generated page will
point to a screenshot of the rendered viewconf. The page will automatically
redirect to the rendering so that if anybody clicks on this link
they'll be taken to an interactive higlass view.
The viewconf to render should be specified with the d= html parameter.
Args:
request: The incoming http request.
Returns:
A response containing an html page with metadata
'''
# the uuid of the viewconf to render
uuid = request.GET.get('d')
if not uuid:
# if there's no uuid specified, return an empty page
return HttpResponseNotFound('<h1>No uuid specified</h1>')
try:
obj = tm.ViewConf.objects.get(uuid=uuid)
except ObjectDoesNotExist:
return HttpResponseNotFound('<h1>No such uuid</h1>')
# the url for the thumnbail
thumb_url=f'{request.scheme}://{request.get_host()}/thumbnail/?d={uuid}'
# the page to redirect to for interactive explorations
redirect_url=f'{request.scheme}://{request.get_host()}/app/?config={uuid}'
# Simple html page. Not a template just for simplicity's sake.
# If it becomes more complex, we can make it into a template.
html = f"""<html>
<meta charset="utf-8">
<meta name="author" content="Peter Kerpedjiev, Fritz Lekschas, Nezar Abdennur, Nils Gehlenborg">
<meta name="description" content="Web-based visual exploration and comparison of Hi-C genome interaction maps and other genomic tracks">
<meta name="keywords" content="3D genome, genomics, genome browser, Hi-C, 4DN, matrix visualization, cooler, Peter Kerpedjiev, Fritz Lekschas, Nils Gehlenborg, Harvard Medical School, Department of Biomedical Informatics">
<meta itemprop="name" content="HiGlass">
<meta itemprop="description" content="Web-based visual exploration and comparison of Hi-C genome interaction maps and other genomic tracks">
<meta itemprop="image" content="{thumb_url}">
<meta name="twitter:card" content="summary_large_image">
<meta name="twitter:site" content="@higlass_io">
<meta name="twitter:title" content="HiGlass">
<meta name="twitter:description" content="Web-based visual exploration and comparison of Hi-C genome interaction maps and other genomic tracks">
<meta name="twitter:creator" content="@flekschas"><meta name="twitter:image:src" content="{thumb_url}">
<meta property="og:title" content="HiGlass"/>
<meta property="og:description" content="Web-based visual exploration and comparison of Hi-C genome interaction maps and other genomic tracks"/>
<meta property="og:type" content="website"/><meta property="og:url" content="https://higlass.io"/>
<meta property="og:image" content="{thumb_url}"/>
<meta name="viewport" content="width=device-width,initial-scale=1,shrink-to-fit=no">
<meta name="theme-color" content="#0f5d92">
<body></body>
<script>
window.location.replace("{redirect_url}");
</script>
</html>
"""
return HttpResponse(html)
def thumbnail(request: HttpRequest):
'''Retrieve a thumbnail for the viewconf specified by the d=
parameter.
Args:
request: The incoming request.
Returns:
A response of either 404 if there's no uuid provided or an
image containing a screenshot of the rendered viewconf with
that uuid.
'''
uuid = request.GET.get('d')
base_url = f'{request.scheme}://localhost/app/'
if not uuid:
return HttpResponseNotFound('<h1>No uuid specified</h1>')
if '.' in uuid or '/' in uuid:
# no funny business
logger.warning('uuid contains . or /: %s', uuid)
return HttpResponseBadRequest("uuid can't contain . or /")
if not op.exists(hss.THUMBNAILS_ROOT):
os.makedirs(hss.THUMBNAILS_ROOT)
output_file = op.abspath(op.join(hss.THUMBNAILS_ROOT, uuid + ".png"))
thumbnails_base = op.abspath(hss.THUMBNAILS_ROOT)
if output_file.find(thumbnails_base) != 0:
logger.warning('Thumbnail file is not in thumbnail_base: %s uuid: %s',
output_file, uuid)
return HttpResponseBadRequest('Strange path')
if not op.exists(output_file):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(
screenshot(
base_url,
uuid,
output_file))
loop.close()
with open(output_file, 'rb') as file:
return HttpResponse(
file.read(),
content_type="image/jpeg")
async def screenshot(
base_url: str,
uuid: str,
output_file: str
):
'''Take a screenshot of a rendered viewconf.
Args:
base_url: The url to use for rendering the viewconf
uuid: The uuid of the viewconf to render
output_file: The location on the local filesystem to cache
the thumbnail.
Returns:
Nothing, just stores the screenshot at the given location.
'''
browser = await launch(
headless=True,
args=['--no-sandbox'],
handleSIGINT=False,
handleSIGTERM=False,
handleSIGHUP=False
)
url = f'{base_url}?config={uuid}'
page = await browser.newPage()
await page.goto(url, {
'waitUntil': 'networkidle0',
})
await page.screenshot({'path': output_file})
await browser.close()
| 36.43125 | 222 | 0.687768 |
492991700d662af55cd6b2170303b1cdd10731bc | 59 | py | Python | 2 semester/PW/Coursera/Python/3 Week/12.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
]
| 1 | 2022-02-06T17:50:25.000Z | 2022-02-06T17:50:25.000Z | 2 semester/PW/Coursera/Python/3 Week/12.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
]
| null | null | null | 2 semester/PW/Coursera/Python/3 Week/12.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
]
| 1 | 2022-03-02T06:45:06.000Z | 2022-03-02T06:45:06.000Z | s = input()
print(s[:s.find("h")] + s[s.rfind("h") + 1:])
| 14.75 | 45 | 0.457627 |
92a333fd2ebecaecff76ca75b4ce6e3e48b68c0a | 272 | py | Python | setup.py | RajeshNathani/easy-nlp | 915c60a46e523684a3730cf5b7574c14f1c71c4b | [
"MIT"
]
| 1 | 2020-08-24T12:28:06.000Z | 2020-08-24T12:28:06.000Z | setup.py | RajeshNathani/easy-nlp | 915c60a46e523684a3730cf5b7574c14f1c71c4b | [
"MIT"
]
| null | null | null | setup.py | RajeshNathani/easy-nlp | 915c60a46e523684a3730cf5b7574c14f1c71c4b | [
"MIT"
]
| null | null | null | from setuptools import setup
setup(name='easy nlp',
version = '0.1.1',
description = "NLP made easy.",
author = "Rajesh Nathani" ,
author_email = "[email protected]",
license = "MIT" ,
install_requires =['spacy==2.2'],
packages = ['easy_nlp'] , zip_safe = False
) | 27.2 | 44 | 0.698529 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.