hexsha
stringlengths
40
40
size
int64
4
1.02M
ext
stringclasses
8 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
209
max_stars_repo_name
stringlengths
5
121
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
209
max_issues_repo_name
stringlengths
5
121
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
209
max_forks_repo_name
stringlengths
5
121
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
4
1.02M
avg_line_length
float64
1.07
66.1k
max_line_length
int64
4
266k
alphanum_fraction
float64
0.01
1
283187b1acdcf513705c7326dc53d32846d75468
1,054
py
Python
image.py
harshkothari410/snn-image-segmentation
18fb28e8b2fee3d7583f6e62fd512ba90863c0ee
[ "MIT" ]
7
2016-04-17T21:11:41.000Z
2021-06-25T09:40:40.000Z
image.py
Arthas1121/snn-image-segmentation
18fb28e8b2fee3d7583f6e62fd512ba90863c0ee
[ "MIT" ]
null
null
null
image.py
Arthas1121/snn-image-segmentation
18fb28e8b2fee3d7583f6e62fd512ba90863c0ee
[ "MIT" ]
6
2016-04-17T19:14:41.000Z
2022-03-09T21:03:12.000Z
from PIL import Image def imageread(filename): file = Image.open(filename) pixel_values = list(file.getdata()) # Compute H and W w, h = file.size # compute pixel matrix pixel_mat = [[0 for x in xrange(w)] for x in xrange(h)] count = 0 for x in xrange(h): for y in xrange(w): # print type(pixel_values[count]) try: if len( pixel_values[count] ) > 1: pixel_mat[x][y] = pixel_values[count][0] #check whether is else: pixel_mat[x][y] = pixel_values[count] count+=1 except: pixel_mat[x][y] = pixel_values[count] count+=1 return pixel_mat, w, h def imagewrite(data, w, h): final_ans = [] count = 0 for x in xrange(h): for y in xrange(w): final_ans.append( data[x][y] ) count+=1 im = Image.new('1', (w,h)) # print im im.putdata(final_ans) im.show() def imagesave(data, w, h, name): final_ans = [] count = 0 for x in xrange(h): for y in xrange(w): final_ans.append( data[x][y] ) count+=1 im = Image.new('1', (w,h)) # print im im.putdata(final_ans) im.save(name+'.jpg')
19.163636
64
0.622391
862c998692f1850567159b1010a13f98027238a1
2,774
py
Python
nipype/interfaces/camino/tests/test_auto_TrackPICo.py
moloney/nipype
a7a9c85c79cb1412ba03406074f83200447ef50b
[ "Apache-2.0" ]
7
2017-02-17T08:54:26.000Z
2022-03-10T20:57:23.000Z
nipype/interfaces/camino/tests/test_auto_TrackPICo.py
moloney/nipype
a7a9c85c79cb1412ba03406074f83200447ef50b
[ "Apache-2.0" ]
1
2016-04-25T15:07:09.000Z
2016-04-25T15:07:09.000Z
nipype/interfaces/camino/tests/test_auto_TrackPICo.py
moloney/nipype
a7a9c85c79cb1412ba03406074f83200447ef50b
[ "Apache-2.0" ]
2
2017-09-23T16:22:00.000Z
2019-08-01T14:18:52.000Z
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from __future__ import unicode_literals from ..dti import TrackPICo def test_TrackPICo_inputs(): input_map = dict( anisfile=dict(argstr='-anisfile %s', ), anisthresh=dict(argstr='-anisthresh %f', ), args=dict(argstr='%s', ), curveinterval=dict( argstr='-curveinterval %f', requires=['curvethresh'], ), curvethresh=dict(argstr='-curvethresh %f', ), data_dims=dict( argstr='-datadims %s', units='voxels', ), environ=dict( nohash=True, usedefault=True, ), gzip=dict(argstr='-gzip', ), ignore_exception=dict( deprecated='1.0.0', nohash=True, usedefault=True, ), in_file=dict( argstr='-inputfile %s', position=1, ), inputdatatype=dict(argstr='-inputdatatype %s', ), inputmodel=dict( argstr='-inputmodel %s', usedefault=True, ), interpolator=dict(argstr='-interpolator %s', ), ipthresh=dict(argstr='-ipthresh %f', ), iterations=dict( argstr='-iterations %d', units='NA', ), maxcomponents=dict( argstr='-maxcomponents %d', units='NA', ), numpds=dict( argstr='-numpds %d', units='NA', ), out_file=dict( argstr='-outputfile %s', genfile=True, position=-1, ), output_root=dict( argstr='-outputroot %s', position=-1, ), outputtracts=dict(argstr='-outputtracts %s', ), pdf=dict(argstr='-pdf %s', ), seed_file=dict( argstr='-seedfile %s', position=2, ), stepsize=dict( argstr='-stepsize %f', requires=['tracker'], ), terminal_output=dict( deprecated='1.0.0', nohash=True, ), tracker=dict( argstr='-tracker %s', usedefault=True, ), voxel_dims=dict( argstr='-voxeldims %s', units='mm', ), ) inputs = TrackPICo.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): assert getattr(inputs.traits()[key], metakey) == value def test_TrackPICo_outputs(): output_map = dict(tracked=dict(), ) outputs = TrackPICo.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): assert getattr(outputs.traits()[key], metakey) == value
28.597938
67
0.503965
0e167608dba640e8e33ffb8e133f56b11ba0dc0a
10,649
py
Python
apps/project/business/board.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
349
2020-08-04T10:21:01.000Z
2022-03-23T08:31:29.000Z
apps/project/business/board.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
2
2021-01-07T06:17:05.000Z
2021-04-01T06:01:30.000Z
apps/project/business/board.py
rainydaygit/testtcloudserver
8037603efe4502726a4d794fb1fc0a3f3cc80137
[ "MIT" ]
70
2020-08-24T06:46:14.000Z
2022-03-25T13:23:27.000Z
import json import requests from flask import request, g, current_app from sqlalchemy import desc, func from sqlalchemy.orm import aliased from apps.auth.models.users import User from apps.project.models.issue import Issue from apps.project.models.modules import Module from apps.project.models.tasks import Task, TaskCase from apps.project.models.version import Version from apps.public.models.public import Config from library.api.transfer import transfer2json class BoardBusiness(object): @classmethod @transfer2json( '?id|!name|!description|!tmethod|!ttype|!status|!start_time|!end_time|!priority|!version_id|!version_name' '|!creator_id|!creator_name|!executor_id|!executor_name|!project_id', ispagination=True) def task_query(cls, projectid, userid, status, iscreator, page_size, page_index, title): # 0:创建,1:任务已删除,2:任务已完成 user_creator = aliased(User) user_executor = aliased(User) ret = Task.query.outerjoin( user_creator, user_creator.id == Task.creator).outerjoin( user_executor, user_executor.id == Task.executor).outerjoin( Version, Version.id == Task.version).add_columns( Task.id.label('id'), Task.name.label('name'), Task.description.label('description'), Task.tmethod.label('tmethod'), Task.ttype.label('ttype'), Task.status.label('status'), func.date_format(Task.start_time, "%Y-%m-%d").label('start_time'), func.date_format(Task.end_time, "%Y-%m-%d").label('end_time'), Task.priority.label('priority'), Task.project_id.label('project_id'), Version.id.label('version_id'), Version.title.label('version_name'), user_creator.id.label('creator_id'), user_creator.nickname.label('creator_name'), user_executor.id.label('executor_id'), user_executor.nickname.label('executor_name'), ) if projectid: ret = ret.filter(Task.project_id == projectid) if iscreator: ret = ret.filter(Task.creator == userid) else: ret = ret.filter(Task.executor == userid) if title not in ["", None]: ret = ret.filter(Task.name.like(f'%{title}%')) ret = ret.filter(Task.status.in_(status)) result = ret.order_by(desc(Task.id) ).limit(int(page_size)).offset(int(page_index - 1) * int(page_size)).all() count = ret.count() return result, count @classmethod @transfer2json( '?taskcaseid|!task_id|!executor_id|!executor_name|!handler_id|!handler_name|!exe_way|!cnumber|!ctype|!title|' '!description|!precondition|!step_result|!is_auto|!status|!comment|!module_id|!module_name|!project_id', ispagination=True ) def task_case_query(cls, projectid, userid, status, iscreator, page_size, page_index, title): # 0:case创建,1:case已删除,2:跳过,3:case执行通过,4:case执行不通过 user_executor = aliased(User) user_handler = aliased(User) ret = TaskCase.query.outerjoin( Module, TaskCase.module_id == Module.id).outerjoin( user_executor, user_executor.id == TaskCase.executor).outerjoin( user_handler, user_handler.id == TaskCase.handler).add_columns( TaskCase.id.label('taskcaseid'), TaskCase.task_id.label('task_id'), TaskCase.exe_way.label('exe_way'), TaskCase.cnumber.label('cnumber'), TaskCase.ctype.label('ctype'), TaskCase.title.label('title'), TaskCase.description.label('description'), TaskCase.precondition.label('precondition'), TaskCase.step_result.label('step_result'), TaskCase.is_auto.label('is_auto'), TaskCase.status.label('status'), TaskCase.comment.label('comment'), TaskCase.project_id.label('project_id'), Module.id.label('module_id'), Module.name.label('module_name'), user_executor.id.label('executor_id'), user_executor.nickname.label('executor_name'), user_handler.id.label('handler_id'), user_handler.nickname.label('handler_name'), ) if projectid: ret = ret.filter(TaskCase.project_id == projectid) if iscreator is 1: ret = ret.filter(TaskCase.handler == userid) else: ret = ret.filter(TaskCase.executor == userid) if title not in ["", None]: ret = ret.filter(TaskCase.title.like(f'%{title}%')) ret = ret.filter(TaskCase.status.in_(status)) result = ret.order_by(desc(TaskCase.id) ).limit(int(page_size)).offset(int(page_index - 1) * int(page_size)).all() count = ret.count() return result, count @classmethod @transfer2json('?id|!issue_number|!title|!handle_status|!description|!chance|!level|!priority|!stage' '|!version_id|!version_name|!creator_id|!creator_name|!handler_id|!handler_name|!project_id', ispagination=True ) def issue_query(cls, projectid, userid, status, iscreator, page_size, page_index, title): # 处理状态 {"1": "待办", "2": "处理中", "3": "测试中", "4": "已关闭", "5": "已拒绝", "6": "延时处理"} user_creator = aliased(User) user_handler = aliased(User) ret = Issue.query.outerjoin( user_creator, user_creator.id == Issue.creator).outerjoin( user_handler, user_handler.id == Issue.handler).outerjoin( Version, Version.id == Issue.version).add_columns( Issue.id.label('id'), Issue.issue_number.label('issue_number'), Issue.title.label('title'), Issue.handle_status.label('handle_status'), Issue.description.label('description'), Issue.chance.label('chance'), Issue.level.label('level'), Issue.priority.label('priority'), Issue.stage.label('stage'), Issue.project_id.label('project_id'), Version.id.label('version_id'), Version.title.label('version_name'), user_creator.id.label('creator_id'), user_creator.nickname.label('creator_name'), user_handler.id.label('handler_id'), user_handler.nickname.label('handler_name'), ) if projectid: ret = ret.filter(Issue.project_id == projectid) if iscreator: ret = ret.filter(Issue.creator == userid) else: ret = ret.filter(Issue.handler == userid) if title not in ["", None]: ret = ret.filter(Issue.title.like(f'%{title}%')) ret = ret.filter(Issue.handle_status.in_(status), Issue.status == Issue.ACTIVE) result = ret.order_by(desc(Issue.id) ).limit(int(page_size)).offset(int(page_index - 1) * int(page_size)).all() count = ret.count() return result, count @classmethod def board_config(cls): user_id = g.userid if g.userid else None board_config = Config.query.add_columns(Config.content.label('content')).filter(Config.module == 'board', Config.module_type == 1).first() board_config = json.loads(board_config.content) current_app.logger.info('board_config:' + str(board_config)) return user_id, board_config @classmethod def user_create(cls, page_size, page_index, r_type, title): project_id = request.args.get('projectid') user_id, board_config = cls.board_config() ret = None count = 0 if r_type == "task": ret, count = cls.task_query(project_id, user_id, board_config['create']['task'], 1, page_size, page_index, title) # task_case_ret = cls.task_case_query(projectid, user_id, board_config['create']['task_case'], 1) if r_type == "issue": ret, count = cls.issue_query(project_id, user_id, board_config['create']['issue'], 1, page_size, page_index, title) return ret, count @classmethod def user_unfinish(cls, page_size, page_index, r_type, title): project_id = request.args.get('projectid') user_id, board_config = cls.board_config() ret = None count = 0 if r_type == "task": ret, count = cls.task_query(project_id, user_id, board_config['unfinish']['task'], 0, page_size, page_index, title) if r_type == "task_case": ret, count = cls.task_case_query(project_id, user_id, board_config['unfinish']['task_case'], 1, page_size, page_index, title) if r_type == "issue": ret, count = cls.issue_query(project_id, user_id, board_config['unfinish']['issue'], 0, page_size, page_index, title) return ret, count @classmethod def user_finish(cls, page_size, page_index, r_type, title): project_id = request.args.get('projectid') user_id, board_config = cls.board_config() ret = None count = 0 if r_type == "task": ret, count = cls.task_query(project_id, user_id, board_config['finish']['task'], 0, page_size, page_index, title) if r_type == "task_case": ret, count = cls.task_case_query(project_id, user_id, board_config['finish']['task_case'], 1, page_size, page_index, title) if r_type == "issue": ret, count = cls.issue_query(project_id, user_id, board_config['finish']['issue'], 0, page_size, page_index, title) return ret, count @classmethod def stf_devices(cls): stf_devices = Config.query.add_columns(Config.content.label('content')).filter( Config.module == 'stf', Config.module_type == 1).first() stf_devices = json.loads(stf_devices.content) current_app.logger.info(json.dumps(stf_devices, ensure_ascii=False)) url = stf_devices['URL'] headers = stf_devices['headers'] ret = requests.get(url, headers=headers) ret = json.loads(ret.content) # logger.info(json.dumps(ret, ensure_ascii=False)) return ret
46.70614
120
0.594892
83d30c47ebde1323539e62d03f67b271652cf3be
4,899
py
Python
integration_testing/run_travis_tests.py
Glitchfix/mindsdb
e6c33d7085898c223030334962596ae8afa3fbd5
[ "MIT" ]
null
null
null
integration_testing/run_travis_tests.py
Glitchfix/mindsdb
e6c33d7085898c223030334962596ae8afa3fbd5
[ "MIT" ]
null
null
null
integration_testing/run_travis_tests.py
Glitchfix/mindsdb
e6c33d7085898c223030334962596ae8afa3fbd5
[ "MIT" ]
null
null
null
from data_generators import * import traceback import sys import os import itertools import logging from colorlog import ColoredFormatter import time import mindsdb from mindsdb import CONST #@TODO: Currently we use this isntead of randomly generated data since randomly generated data is not reliable enough # We tell mindsDB what we want to learn and from what data mdb = mindsdb.Predictor(name='home_rentals_price') mdb.learn( to_predict='rental_price', # the column we want to learn to predict given all the data in the file from_data="https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv" # the path to the file where we can learn from, (note: can be url) ) prediction = mdb.predict(when={'sqft':300}) print(prediction[0]) amd = mdb.get_model_data('home_rentals_price') ''' types_that_work = ['int','float','date','datetime','timestamp','ascii'] logger = None def setup_testing_logger(): global logger formatter = ColoredFormatter( "%(log_color)s%(message)s", datefmt=None, reset=True, log_colors={ 'DEBUG': 'black,bg_white', 'INFO': 'blue,bg_white', 'WARNING': 'orange,bg_white', 'ERROR': 'red,bg_white', 'CRITICAL': 'red,bg_white', } ) logger = logging.getLogger('mindsdb_integration_testing') logger.handlers = [] handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) def run_tests(): logger.info('Starting one-label test') separator = ',' train_file_name = 'train_data.csv' test_file_name = 'test_data.csv' data_len = 8000 # Create the full dataset logger.debug(f'Creating one-labe test datasets and saving them to {train_file_name} and {test_file_name}, total dataset size will be {data_len} rows') try: features = generate_value_cols(types_that_work,data_len, separator) labels = [generate_labels_2(features, separator)] feature_headers = list(map(lambda col: col[0], features)) label_headers = list(map(lambda col: col[0], labels)) # Create the training dataset and save it to a file columns_train = list(map(lambda col: col[1:int(len(col)*3/4)], features)) columns_train.extend(list(map(lambda col: col[1:int(len(col)*3/4)], labels))) columns_to_file(columns_train, train_file_name, separator, headers=[*feature_headers,*label_headers]) # Create the testing dataset and save it to a file columns_test = list(map(lambda col: col[int(len(col)*3/4):], features)) columns_to_file(columns_test, test_file_name, separator, headers=feature_headers) logger.debug(f'Datasets generate and saved to files successfully') except: print(traceback.format_exc()) logger.error(f'Failed to generate datasets !') exit(1) # Train mdb = None try: mdb = mindsdb.Predictor(name='test_one_label_prediction') logger.debug(f'Succesfully create mindsdb Predictor') except: logger.error(f'Failed to create mindsdb Predictor') exit(1) try: mdb.learn(from_data=train_file_name, to_predict=label_headers) logger.info(f'--------------- Learning ran succesfully ---------------') mdb.learn(from_data=train_file_name, to_predict=label_headers, rebuild_model=False) logger.info(f'--------------- Additional learning ran succesfully ---------------') except: print(traceback.format_exc()) logger.error(f'Failed during the training !') exit(1) # Predict try: mdb = mindsdb.Predictor(name='test_one_label_prediction') logger.debug(f'Succesfully create mindsdb Predictor') except: print(traceback.format_exc()) logger.error(f'Failed to create mindsdb Predictor') exit(1) try: results = mdb.predict(when_data=test_file_name) for row in results: expect_columns = [label_headers[0] ,label_headers[0] + '_confidence'] for col in expect_columns: if col not in row: logger.error(f'Prediction failed to return expected column: {col}') logger.debug('Got row: {}'.format(row)) exit(1) logger.info(f'--------------- Predicting ran succesfully ---------------') # Print statements are in for debugging, remove later, but keep the funcion calls to make sure the interface is working models = mdb.get_models() amd = mdb.get_model_data('test_one_label_prediction') print(amd) except: print(traceback.format_exc()) logger.error(f'Failed whilst predicting') exit(1) logger.info('Travis CLI Tests ran succesfully !') setup_testing_logger() run_tests() '''
33.554795
154
0.654419
7a220662c532f6177643a5bc0c91a4955cdccfc8
42,389
py
Python
template_container_human/labels/slice_43.py
lkondratova/Brainplot
3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d
[ "MIT" ]
null
null
null
template_container_human/labels/slice_43.py
lkondratova/Brainplot
3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d
[ "MIT" ]
null
null
null
template_container_human/labels/slice_43.py
lkondratova/Brainplot
3c8a88c1995dedeaa5cbd88ee71499c7cf9c571d
[ "MIT" ]
null
null
null
coordinates_E0E1E1 = ((124, 121), (124, 122), (125, 119), (125, 124), (126, 97), (126, 118), (126, 121), (126, 122), (126, 125), (126, 144), (127, 84), (127, 97), (127, 98), (127, 117), (127, 119), (127, 120), (127, 121), (127, 122), (127, 123), (127, 126), (127, 142), (127, 143), (128, 84), (128, 86), (128, 96), (128, 99), (128, 110), (128, 112), (128, 113), (128, 114), (128, 115), (128, 118), (128, 119), (128, 120), (128, 121), (128, 122), (128, 123), (128, 124), (128, 127), (128, 133), (128, 141), (128, 142), (129, 85), (129, 96), (129, 99), (129, 109), (129, 117), (129, 118), (129, 119), (129, 120), (129, 121), (129, 122), (129, 123), (129, 124), (129, 125), (129, 128), (129, 131), (129, 133), (129, 141), (130, 87), (130, 95), (130, 97), (130, 98), (130, 100), (130, 109), (130, 111), (130, 112), (130, 113), (130, 114), (130, 115), (130, 116), (130, 117), (130, 118), (130, 119), (130, 120), (130, 121), (130, 122), (130, 123), (130, 124), (130, 125), (130, 126), (130, 127), (130, 130), (130, 133), (130, 140), (130, 141), (131, 86), (131, 94), (131, 96), (131, 97), (131, 98), (131, 100), (131, 109), (131, 111), (131, 112), (131, 113), (131, 114), (131, 115), (131, 116), (131, 117), (131, 118), (131, 119), (131, 120), (131, 121), (131, 122), (131, 123), (131, 124), (131, 125), (131, 126), (131, 127), (131, 128), (131, 131), (131, 132), (131, 134), (131, 139), (131, 141), (132, 87), (132, 89), (132, 90), (132, 91), (132, 92), (132, 95), (132, 96), (132, 97), (132, 98), (132, 99), (132, 101), (132, 109), (132, 111), (132, 112), (132, 113), (132, 114), (132, 115), (132, 116), (132, 117), (132, 118), (132, 119), (132, 120), (132, 121), (132, 122), (132, 123), (132, 124), (132, 125), (132, 126), (132, 127), (132, 128), (132, 129), (132, 130), (132, 131), (132, 132), (132, 133), (132, 135), (132, 136), (132, 137), (132, 141), (133, 87), (133, 94), (133, 95), (133, 96), (133, 97), (133, 98), (133, 99), (133, 100), (133, 102), (133, 109), (133, 111), (133, 112), (133, 113), (133, 114), (133, 115), (133, 116), (133, 117), (133, 118), (133, 119), (133, 120), (133, 121), (133, 122), (133, 123), (133, 124), (133, 125), (133, 126), (133, 127), (133, 128), (133, 129), (133, 130), (133, 131), (133, 132), (133, 133), (133, 134), (133, 139), (133, 141), (134, 87), (134, 89), (134, 90), (134, 91), (134, 92), (134, 93), (134, 94), (134, 95), (134, 96), (134, 97), (134, 98), (134, 99), (134, 100), (134, 101), (134, 103), (134, 108), (134, 110), (134, 111), (134, 112), (134, 113), (134, 114), (134, 115), (134, 116), (134, 117), (134, 118), (134, 119), (134, 120), (134, 121), (134, 122), (134, 123), (134, 124), (134, 125), (134, 126), (134, 127), (134, 128), (134, 129), (134, 130), (134, 131), (134, 132), (134, 133), (134, 134), (134, 135), (134, 136), (134, 137), (134, 138), (134, 139), (134, 141), (135, 87), (135, 89), (135, 90), (135, 91), (135, 92), (135, 93), (135, 94), (135, 95), (135, 96), (135, 97), (135, 98), (135, 99), (135, 100), (135, 101), (135, 102), (135, 104), (135, 107), (135, 109), (135, 110), (135, 111), (135, 112), (135, 113), (135, 114), (135, 115), (135, 116), (135, 117), (135, 118), (135, 119), (135, 120), (135, 121), (135, 122), (135, 123), (135, 124), (135, 125), (135, 126), (135, 127), (135, 128), (135, 129), (135, 130), (135, 131), (135, 132), (135, 133), (135, 134), (135, 135), (135, 136), (135, 137), (135, 138), (135, 139), (135, 140), (135, 142), (136, 87), (136, 89), (136, 90), (136, 91), (136, 92), (136, 93), (136, 94), (136, 95), (136, 96), (136, 97), (136, 98), (136, 99), (136, 100), (136, 101), (136, 102), (136, 103), (136, 108), (136, 109), (136, 110), (136, 111), (136, 112), (136, 113), (136, 114), (136, 115), (136, 116), (136, 117), (136, 118), (136, 119), (136, 120), (136, 121), (136, 122), (136, 123), (136, 124), (136, 125), (136, 126), (136, 127), (136, 128), (136, 129), (136, 130), (136, 131), (136, 132), (136, 133), (136, 134), (136, 135), (136, 136), (136, 137), (136, 138), (136, 139), (136, 140), (136, 141), (136, 145), (137, 80), (137, 82), (137, 83), (137, 84), (137, 85), (137, 88), (137, 89), (137, 90), (137, 91), (137, 92), (137, 93), (137, 94), (137, 95), (137, 96), (137, 97), (137, 98), (137, 99), (137, 100), (137, 101), (137, 102), (137, 103), (137, 104), (137, 105), (137, 106), (137, 107), (137, 108), (137, 109), (137, 110), (137, 111), (137, 112), (137, 113), (137, 114), (137, 115), (137, 116), (137, 117), (137, 118), (137, 119), (137, 120), (137, 121), (137, 122), (137, 123), (137, 124), (137, 125), (137, 126), (137, 127), (137, 128), (137, 129), (137, 130), (137, 131), (137, 132), (137, 133), (137, 134), (137, 135), (137, 136), (137, 137), (137, 138), (137, 139), (137, 140), (137, 141), (137, 144), (138, 79), (138, 81), (138, 82), (138, 83), (138, 84), (138, 85), (138, 86), (138, 87), (138, 89), (138, 90), (138, 91), (138, 92), (138, 93), (138, 101), (138, 102), (138, 103), (138, 104), (138, 105), (138, 106), (138, 107), (138, 108), (138, 109), (138, 110), (138, 111), (138, 112), (138, 113), (138, 114), (138, 115), (138, 116), (138, 117), (138, 118), (138, 119), (138, 120), (138, 121), (138, 122), (138, 123), (138, 124), (138, 125), (138, 126), (138, 127), (138, 128), (138, 129), (138, 130), (138, 131), (138, 132), (138, 133), (138, 134), (138, 135), (138, 136), (138, 137), (138, 138), (138, 139), (138, 143), (139, 89), (139, 90), (139, 91), (139, 94), (139, 95), (139, 96), (139, 97), (139, 98), (139, 99), (139, 100), (139, 102), (139, 103), (139, 104), (139, 105), (139, 106), (139, 107), (139, 108), (139, 109), (139, 110), (139, 111), (139, 112), (139, 113), (139, 114), (139, 115), (139, 116), (139, 117), (139, 118), (139, 119), (139, 120), (139, 121), (139, 122), (139, 123), (139, 124), (139, 125), (139, 126), (139, 127), (139, 128), (139, 129), (139, 130), (139, 131), (139, 132), (139, 133), (139, 134), (139, 135), (139, 136), (139, 137), (139, 138), (139, 141), (140, 89), (140, 92), (140, 101), (140, 103), (140, 104), (140, 105), (140, 106), (140, 107), (140, 108), (140, 109), (140, 110), (140, 111), (140, 112), (140, 113), (140, 114), (140, 115), (140, 116), (140, 117), (140, 118), (140, 119), (140, 120), (140, 121), (140, 122), (140, 123), (140, 124), (140, 125), (140, 126), (140, 127), (140, 128), (140, 129), (140, 130), (140, 131), (140, 132), (140, 133), (140, 134), (140, 135), (140, 136), (140, 139), (141, 89), (141, 91), (141, 102), (141, 104), (141, 105), (141, 106), (141, 107), (141, 108), (141, 109), (141, 110), (141, 111), (141, 112), (141, 113), (141, 114), (141, 115), (141, 116), (141, 117), (141, 118), (141, 119), (141, 120), (141, 121), (141, 122), (141, 123), (141, 124), (141, 125), (141, 126), (141, 127), (141, 128), (141, 129), (141, 130), (141, 131), (141, 132), (141, 133), (141, 134), (141, 135), (141, 138), (142, 89), (142, 91), (142, 102), (142, 104), (142, 105), (142, 106), (142, 107), (142, 108), (142, 109), (142, 110), (142, 111), (142, 112), (142, 113), (142, 114), (142, 115), (142, 116), (142, 117), (142, 118), (142, 119), (142, 120), (142, 121), (142, 122), (142, 123), (142, 124), (142, 125), (142, 126), (142, 127), (142, 128), (142, 129), (142, 130), (142, 131), (142, 132), (142, 133), (142, 134), (142, 136), (143, 89), (143, 102), (143, 104), (143, 105), (143, 106), (143, 107), (143, 108), (143, 109), (143, 110), (143, 111), (143, 112), (143, 113), (143, 114), (143, 115), (143, 116), (143, 117), (143, 118), (143, 119), (143, 120), (143, 121), (143, 122), (143, 123), (143, 124), (143, 125), (143, 126), (143, 127), (143, 128), (143, 129), (143, 130), (143, 131), (143, 132), (143, 133), (143, 135), (144, 88), (144, 89), (144, 102), (144, 104), (144, 105), (144, 106), (144, 107), (144, 108), (144, 109), (144, 110), (144, 111), (144, 112), (144, 113), (144, 114), (144, 115), (144, 116), (144, 117), (144, 118), (144, 119), (144, 120), (144, 121), (144, 122), (144, 123), (144, 124), (144, 125), (144, 126), (144, 127), (144, 128), (144, 129), (144, 130), (144, 134), (145, 88), (145, 102), (145, 104), (145, 105), (145, 106), (145, 107), (145, 108), (145, 109), (145, 110), (145, 111), (145, 112), (145, 113), (145, 114), (145, 115), (145, 116), (145, 117), (145, 118), (145, 119), (145, 120), (145, 121), (145, 122), (145, 123), (145, 124), (145, 125), (145, 126), (145, 127), (145, 128), (145, 131), (145, 134), (146, 87), (146, 101), (146, 103), (146, 104), (146, 105), (146, 106), (146, 107), (146, 108), (146, 109), (146, 110), (146, 111), (146, 112), (146, 113), (146, 114), (146, 115), (146, 116), (146, 117), (146, 118), (146, 119), (146, 120), (146, 121), (146, 122), (146, 123), (146, 124), (146, 125), (146, 129), (146, 130), (146, 134), (147, 86), (147, 87), (147, 101), (147, 103), (147, 104), (147, 105), (147, 106), (147, 107), (147, 108), (147, 109), (147, 110), (147, 111), (147, 112), (147, 113), (147, 114), (147, 115), (147, 116), (147, 117), (147, 118), (147, 119), (147, 120), (147, 121), (147, 122), (147, 123), (147, 124), (147, 125), (147, 127), (147, 134), (148, 85), (148, 100), (148, 102), (148, 103), (148, 104), (148, 105), (148, 106), (148, 107), (148, 108), (148, 109), (148, 110), (148, 111), (148, 112), (148, 113), (148, 114), (148, 115), (148, 116), (148, 117), (148, 118), (148, 119), (148, 120), (148, 121), (148, 122), (148, 123), (148, 125), (148, 135), (149, 99), (149, 101), (149, 102), (149, 103), (149, 105), (149, 106), (149, 107), (149, 108), (149, 109), (149, 110), (149, 111), (149, 112), (149, 113), (149, 114), (149, 115), (149, 116), (149, 117), (149, 118), (149, 119), (149, 120), (149, 121), (149, 122), (149, 125), (149, 136), (149, 139), (150, 99), (150, 101), (150, 102), (150, 104), (150, 106), (150, 107), (150, 108), (150, 109), (150, 110), (150, 111), (150, 112), (150, 113), (150, 114), (150, 115), (150, 116), (150, 117), (150, 118), (150, 119), (150, 120), (150, 121), (150, 124), (150, 139), (151, 98), (151, 100), (151, 101), (151, 103), (151, 107), (151, 108), (151, 109), (151, 110), (151, 111), (151, 112), (151, 115), (151, 116), (151, 117), (151, 118), (151, 119), (151, 120), (151, 121), (151, 122), (151, 124), (151, 137), (151, 139), (152, 98), (152, 99), (152, 100), (152, 102), (152, 106), (152, 108), (152, 109), (152, 110), (152, 111), (152, 113), (152, 114), (152, 116), (152, 117), (152, 118), (152, 119), (152, 121), (152, 138), (153, 97), (153, 99), (153, 101), (153, 107), (153, 109), (153, 110), (153, 112), (153, 115), (153, 117), (153, 118), (153, 120), (154, 96), (154, 98), (154, 100), (154, 107), (154, 109), (154, 111), (154, 116), (154, 118), (154, 120), (155, 95), (155, 97), (155, 99), (155, 107), (155, 110), (155, 117), (155, 120), (156, 94), (156, 97), (156, 99), (156, 107), (156, 109), (156, 117), (156, 120), (157, 93), (157, 95), (157, 98), (157, 107), (157, 118), (157, 121), (158, 92), (158, 94), (158, 97), (158, 98), (158, 107), (158, 108), (158, 118), (158, 121), (159, 107), (159, 118), (159, 121), (160, 106), (160, 107), (160, 119), (160, 121), (160, 132), (161, 106), (161, 119), (161, 120), (161, 132), (162, 106), (162, 119), (162, 120), (162, 132), (163, 106), (163, 119), (163, 132), (164, 106), (164, 118), (164, 119), (164, 132), (165, 106), (165, 118), (165, 119), (165, 132), (166, 118), (166, 119), (167, 118), (167, 119), (168, 117), (168, 119), (169, 117), (169, 119), (170, 117), (170, 119), ) coordinates_E1E1E1 = ((76, 121), (76, 122), (77, 120), (77, 122), (78, 120), (78, 122), (79, 120), (79, 122), (80, 105), (80, 120), (80, 123), (81, 105), (81, 119), (81, 121), (81, 122), (81, 123), (81, 127), (81, 129), (82, 105), (82, 119), (82, 121), (82, 122), (82, 123), (82, 125), (82, 126), (82, 128), (83, 105), (83, 119), (83, 121), (83, 122), (83, 123), (83, 124), (83, 127), (84, 105), (84, 119), (84, 121), (84, 122), (84, 123), (84, 124), (84, 126), (85, 104), (85, 106), (85, 119), (85, 121), (85, 122), (85, 123), (85, 124), (85, 126), (86, 103), (86, 106), (86, 118), (86, 120), (86, 121), (86, 122), (86, 123), (86, 125), (87, 102), (87, 106), (87, 118), (87, 120), (87, 121), (87, 122), (87, 123), (87, 125), (88, 92), (88, 101), (88, 104), (88, 105), (88, 107), (88, 118), (88, 120), (88, 121), (88, 122), (88, 124), (88, 136), (89, 91), (89, 100), (89, 103), (89, 104), (89, 105), (89, 107), (89, 117), (89, 119), (89, 120), (89, 121), (89, 122), (89, 124), (89, 135), (89, 136), (90, 91), (90, 93), (90, 99), (90, 101), (90, 104), (90, 105), (90, 106), (90, 108), (90, 117), (90, 119), (90, 120), (90, 121), (90, 123), (90, 134), (90, 136), (91, 90), (91, 92), (91, 94), (91, 98), (91, 99), (91, 102), (91, 104), (91, 105), (91, 106), (91, 107), (91, 109), (91, 116), (91, 118), (91, 119), (91, 120), (91, 122), (91, 133), (91, 135), (91, 137), (92, 93), (92, 96), (92, 98), (92, 105), (92, 106), (92, 107), (92, 108), (92, 110), (92, 111), (92, 112), (92, 113), (92, 114), (92, 117), (92, 118), (92, 119), (92, 121), (92, 132), (92, 134), (92, 135), (92, 137), (93, 94), (93, 97), (93, 104), (93, 106), (93, 107), (93, 108), (93, 109), (93, 112), (93, 113), (93, 116), (93, 117), (93, 118), (93, 119), (93, 121), (93, 132), (93, 134), (93, 135), (93, 136), (93, 138), (94, 105), (94, 107), (94, 108), (94, 109), (94, 110), (94, 111), (94, 112), (94, 113), (94, 114), (94, 115), (94, 116), (94, 117), (94, 118), (94, 119), (94, 121), (94, 132), (94, 134), (94, 135), (94, 136), (94, 137), (95, 106), (95, 108), (95, 109), (95, 110), (95, 111), (95, 112), (95, 113), (95, 114), (95, 115), (95, 116), (95, 117), (95, 118), (95, 119), (95, 121), (95, 132), (95, 134), (95, 135), (95, 136), (95, 139), (96, 106), (96, 108), (96, 109), (96, 110), (96, 111), (96, 112), (96, 113), (96, 114), (96, 115), (96, 116), (96, 117), (96, 118), (96, 119), (96, 120), (96, 122), (96, 131), (96, 133), (96, 134), (96, 135), (96, 137), (97, 106), (97, 108), (97, 109), (97, 110), (97, 111), (97, 112), (97, 113), (97, 114), (97, 115), (97, 116), (97, 117), (97, 118), (97, 119), (97, 120), (97, 122), (97, 130), (97, 132), (97, 133), (97, 134), (97, 136), (98, 105), (98, 107), (98, 108), (98, 109), (98, 110), (98, 111), (98, 112), (98, 113), (98, 114), (98, 115), (98, 116), (98, 117), (98, 118), (98, 119), (98, 120), (98, 121), (98, 122), (98, 124), (98, 125), (98, 126), (98, 127), (98, 128), (98, 131), (98, 132), (98, 133), (98, 134), (98, 136), (99, 103), (99, 106), (99, 107), (99, 108), (99, 109), (99, 110), (99, 111), (99, 112), (99, 113), (99, 114), (99, 115), (99, 116), (99, 117), (99, 118), (99, 119), (99, 120), (99, 121), (99, 122), (99, 130), (99, 131), (99, 132), (99, 133), (99, 135), (100, 101), (100, 105), (100, 106), (100, 107), (100, 108), (100, 109), (100, 110), (100, 111), (100, 112), (100, 113), (100, 114), (100, 115), (100, 116), (100, 117), (100, 118), (100, 119), (100, 120), (100, 121), (100, 122), (100, 123), (100, 124), (100, 125), (100, 126), (100, 127), (100, 128), (100, 129), (100, 130), (100, 131), (100, 132), (100, 133), (100, 135), (101, 88), (101, 90), (101, 98), (101, 100), (101, 103), (101, 104), (101, 105), (101, 106), (101, 107), (101, 108), (101, 109), (101, 110), (101, 111), (101, 112), (101, 113), (101, 114), (101, 115), (101, 116), (101, 117), (101, 118), (101, 119), (101, 120), (101, 121), (101, 122), (101, 123), (101, 124), (101, 125), (101, 126), (101, 127), (101, 128), (101, 129), (101, 130), (101, 131), (101, 132), (101, 133), (101, 135), (102, 85), (102, 87), (102, 88), (102, 91), (102, 92), (102, 93), (102, 94), (102, 95), (102, 96), (102, 97), (102, 101), (102, 102), (102, 103), (102, 104), (102, 105), (102, 106), (102, 107), (102, 108), (102, 109), (102, 110), (102, 111), (102, 112), (102, 113), (102, 114), (102, 115), (102, 116), (102, 117), (102, 118), (102, 119), (102, 120), (102, 121), (102, 122), (102, 123), (102, 124), (102, 125), (102, 126), (102, 127), (102, 128), (102, 129), (102, 130), (102, 131), (102, 132), (102, 133), (102, 135), (103, 84), (103, 90), (103, 98), (103, 99), (103, 100), (103, 101), (103, 102), (103, 103), (103, 104), (103, 105), (103, 106), (103, 107), (103, 108), (103, 109), (103, 110), (103, 111), (103, 112), (103, 113), (103, 114), (103, 115), (103, 116), (103, 117), (103, 118), (103, 119), (103, 120), (103, 121), (103, 122), (103, 123), (103, 124), (103, 125), (103, 126), (103, 127), (103, 128), (103, 129), (103, 130), (103, 131), (103, 132), (103, 133), (103, 134), (103, 136), (104, 91), (104, 93), (104, 94), (104, 95), (104, 96), (104, 97), (104, 98), (104, 99), (104, 100), (104, 101), (104, 102), (104, 103), (104, 104), (104, 105), (104, 106), (104, 107), (104, 108), (104, 109), (104, 110), (104, 111), (104, 112), (104, 113), (104, 114), (104, 115), (104, 116), (104, 117), (104, 118), (104, 119), (104, 120), (104, 121), (104, 122), (104, 123), (104, 124), (104, 125), (104, 126), (104, 127), (104, 128), (104, 129), (104, 130), (104, 131), (104, 132), (104, 133), (104, 134), (104, 135), (104, 138), (104, 139), (104, 141), (105, 91), (105, 93), (105, 94), (105, 95), (105, 96), (105, 97), (105, 98), (105, 99), (105, 100), (105, 101), (105, 102), (105, 103), (105, 108), (105, 109), (105, 110), (105, 111), (105, 112), (105, 113), (105, 114), (105, 115), (105, 116), (105, 117), (105, 118), (105, 119), (105, 120), (105, 121), (105, 122), (105, 123), (105, 124), (105, 125), (105, 126), (105, 127), (105, 128), (105, 129), (105, 130), (105, 131), (105, 132), (105, 133), (105, 134), (105, 135), (105, 136), (105, 142), (106, 91), (106, 93), (106, 94), (106, 95), (106, 96), (106, 97), (106, 98), (106, 99), (106, 100), (106, 104), (106, 105), (106, 106), (106, 107), (106, 110), (106, 111), (106, 112), (106, 113), (106, 114), (106, 115), (106, 116), (106, 117), (106, 118), (106, 119), (106, 120), (106, 121), (106, 122), (106, 123), (106, 124), (106, 125), (106, 126), (106, 127), (106, 128), (106, 129), (106, 130), (106, 131), (106, 132), (106, 133), (106, 134), (106, 135), (106, 136), (106, 137), (106, 138), (106, 139), (106, 140), (106, 142), (107, 91), (107, 93), (107, 94), (107, 95), (107, 96), (107, 97), (107, 98), (107, 101), (107, 102), (107, 103), (107, 108), (107, 111), (107, 112), (107, 113), (107, 114), (107, 115), (107, 116), (107, 117), (107, 118), (107, 119), (107, 120), (107, 121), (107, 122), (107, 123), (107, 124), (107, 125), (107, 126), (107, 127), (107, 128), (107, 129), (107, 130), (107, 131), (107, 132), (107, 133), (107, 134), (107, 135), (107, 136), (107, 137), (107, 138), (107, 139), (107, 141), (108, 91), (108, 93), (108, 94), (108, 95), (108, 96), (108, 97), (108, 100), (108, 110), (108, 112), (108, 113), (108, 114), (108, 115), (108, 116), (108, 117), (108, 118), (108, 119), (108, 120), (108, 121), (108, 122), (108, 123), (108, 124), (108, 125), (108, 126), (108, 127), (108, 128), (108, 129), (108, 130), (108, 131), (108, 132), (108, 133), (108, 134), (108, 135), (108, 136), (108, 137), (108, 138), (108, 140), (109, 90), (109, 92), (109, 93), (109, 94), (109, 95), (109, 96), (109, 98), (109, 111), (109, 113), (109, 114), (109, 115), (109, 116), (109, 117), (109, 118), (109, 119), (109, 120), (109, 121), (109, 122), (109, 123), (109, 124), (109, 125), (109, 126), (109, 127), (109, 128), (109, 129), (109, 130), (109, 131), (109, 132), (109, 133), (109, 134), (109, 135), (109, 136), (109, 137), (109, 138), (109, 140), (110, 90), (110, 92), (110, 93), (110, 94), (110, 95), (110, 97), (110, 111), (110, 113), (110, 114), (110, 115), (110, 116), (110, 117), (110, 118), (110, 119), (110, 120), (110, 121), (110, 122), (110, 123), (110, 124), (110, 125), (110, 126), (110, 127), (110, 128), (110, 129), (110, 130), (110, 131), (110, 132), (110, 134), (110, 135), (110, 136), (110, 137), (110, 138), (110, 140), (111, 89), (111, 91), (111, 92), (111, 93), (111, 94), (111, 96), (111, 111), (111, 113), (111, 114), (111, 115), (111, 116), (111, 117), (111, 118), (111, 119), (111, 120), (111, 121), (111, 122), (111, 123), (111, 124), (111, 125), (111, 126), (111, 127), (111, 128), (111, 129), (111, 130), (111, 133), (111, 136), (111, 137), (111, 138), (111, 140), (112, 88), (112, 90), (112, 94), (112, 96), (112, 111), (112, 113), (112, 114), (112, 115), (112, 116), (112, 117), (112, 118), (112, 119), (112, 120), (112, 121), (112, 122), (112, 123), (112, 124), (112, 125), (112, 126), (112, 127), (112, 128), (112, 129), (112, 131), (112, 134), (112, 137), (112, 140), (113, 77), (113, 82), (113, 83), (113, 84), (113, 85), (113, 86), (113, 87), (113, 89), (113, 91), (113, 92), (113, 96), (113, 111), (113, 113), (113, 114), (113, 115), (113, 117), (113, 118), (113, 119), (113, 120), (113, 121), (113, 122), (113, 123), (113, 124), (113, 125), (113, 126), (113, 127), (113, 128), (113, 130), (113, 136), (113, 138), (113, 140), (114, 77), (114, 79), (114, 80), (114, 83), (114, 90), (114, 94), (114, 95), (114, 111), (114, 116), (114, 119), (114, 120), (114, 121), (114, 122), (114, 123), (114, 124), (114, 125), (114, 126), (114, 127), (114, 129), (114, 137), (115, 77), (115, 84), (115, 89), (115, 95), (115, 111), (115, 113), (115, 114), (115, 118), (115, 120), (115, 121), (115, 122), (115, 123), (115, 129), (115, 137), (116, 77), (116, 85), (116, 88), (116, 111), (116, 119), (116, 121), (116, 122), (116, 125), (116, 126), (116, 128), (117, 119), (117, 121), (117, 123), (118, 120), (118, 123), (119, 120), (119, 122), (120, 121), ) coordinates_FEDAB9 = ((126, 79), (126, 80), (127, 78), (127, 82), (128, 77), (128, 79), (128, 80), (128, 82), (129, 76), (129, 78), (129, 79), (129, 80), (129, 81), (129, 83), (130, 75), (130, 77), (130, 78), (130, 79), (130, 80), (130, 81), (130, 83), (131, 75), (131, 77), (131, 78), (131, 79), (131, 80), (131, 81), (131, 82), (131, 84), (132, 76), (132, 78), (132, 79), (132, 80), (132, 81), (132, 82), (132, 84), (133, 76), (133, 78), (133, 79), (133, 80), (133, 81), (133, 82), (133, 83), (133, 85), (134, 76), (134, 78), (134, 85), (135, 76), (135, 80), (135, 81), (135, 82), (135, 83), (135, 85), (136, 75), (136, 78), (137, 75), (137, 77), (137, 78), (138, 74), (138, 77), (139, 74), (139, 77), (140, 74), (140, 76), (140, 77), (140, 78), (140, 79), (140, 80), (140, 81), (140, 82), (140, 83), (140, 84), (140, 85), (140, 87), (141, 75), (141, 82), (141, 83), (141, 84), (141, 85), (141, 87), (141, 94), (141, 96), (141, 97), (141, 98), (141, 100), (142, 77), (142, 78), (142, 80), (142, 93), ) coordinates_D970D6 = ((123, 83), (124, 79), (124, 81), (124, 85), (124, 86), (124, 87), (124, 88), (124, 89), (124, 90), (125, 81), (125, 83), (125, 84), (125, 90), (126, 86), (126, 88), (126, 90), (127, 88), (127, 90), (128, 88), (128, 90), (129, 89), (130, 89), ) coordinates_01CED1 = ((143, 82), (143, 84), (143, 86), (143, 97), (143, 98), (143, 100), (144, 86), (144, 91), (144, 93), (144, 94), (144, 95), (144, 96), (144, 100), (145, 80), (145, 82), (145, 83), (145, 85), (145, 90), (145, 97), (145, 99), (146, 79), (146, 82), (146, 84), (146, 90), (146, 92), (146, 93), (146, 94), (146, 95), (146, 96), (146, 97), (146, 99), (147, 78), (147, 80), (147, 81), (147, 83), (147, 89), (147, 91), (147, 92), (147, 93), (147, 94), (147, 95), (147, 96), (147, 98), (148, 78), (148, 80), (148, 81), (148, 83), (148, 88), (148, 90), (148, 91), (148, 92), (148, 93), (148, 94), (148, 95), (148, 96), (148, 98), (149, 79), (149, 81), (149, 83), (149, 86), (149, 87), (149, 89), (149, 90), (149, 91), (149, 92), (149, 93), (149, 94), (149, 95), (149, 97), (150, 80), (150, 82), (150, 83), (150, 84), (150, 85), (150, 92), (150, 93), (150, 94), (150, 96), (151, 81), (151, 89), (151, 92), (151, 94), (151, 96), (152, 82), (152, 84), (152, 85), (152, 87), (152, 92), (152, 95), (153, 91), (153, 94), (153, 103), (153, 105), (154, 91), (154, 93), (154, 105), (155, 90), (155, 92), (155, 102), (155, 105), (155, 112), (156, 89), (156, 91), (156, 101), (156, 103), (156, 105), (156, 113), (157, 88), (157, 90), (157, 101), (157, 103), (157, 105), (157, 111), (157, 113), (158, 87), (158, 90), (158, 100), (158, 102), (158, 103), (158, 104), (158, 105), (158, 110), (158, 113), (159, 87), (159, 90), (159, 99), (159, 101), (159, 102), (159, 104), (159, 110), (159, 113), (160, 87), (160, 88), (160, 89), (160, 90), (160, 91), (160, 92), (160, 93), (160, 94), (160, 95), (160, 96), (160, 97), (160, 100), (160, 101), (160, 102), (160, 104), (160, 109), (160, 111), (160, 113), (161, 87), (161, 89), (161, 90), (161, 100), (161, 101), (161, 102), (161, 104), (161, 109), (161, 110), (161, 111), (161, 113), (162, 90), (162, 91), (162, 92), (162, 93), (162, 94), (162, 95), (162, 96), (162, 97), (162, 98), (162, 99), (162, 100), (162, 101), (162, 102), (162, 104), (162, 108), (162, 110), (162, 111), (162, 113), (163, 88), (163, 96), (163, 100), (163, 101), (163, 102), (163, 104), (163, 108), (163, 110), (163, 111), (163, 113), (164, 90), (164, 92), (164, 93), (164, 94), (164, 95), (164, 96), (164, 97), (164, 98), (164, 101), (164, 102), (164, 104), (164, 108), (164, 110), (164, 111), (164, 113), (165, 100), (165, 102), (165, 104), (165, 108), (165, 110), (165, 111), (165, 113), (166, 101), (166, 104), (166, 105), (166, 108), (166, 110), (166, 111), (166, 113), (167, 101), (167, 103), (167, 105), (167, 109), (167, 111), (167, 113), (168, 101), (168, 103), (168, 104), (168, 106), (168, 109), (168, 111), (168, 113), (169, 102), (169, 104), (169, 105), (169, 108), (169, 109), (169, 110), (169, 112), (170, 103), (170, 106), (170, 109), (170, 110), (170, 112), (171, 104), (171, 107), (171, 108), (171, 109), (171, 111), (172, 106), (172, 110), (173, 107), (173, 109), ) coordinates_FE3E96 = ((121, 99), (121, 100), (121, 101), (121, 102), (122, 94), (122, 95), (122, 96), (122, 97), (122, 98), (122, 104), (122, 105), (122, 132), (122, 133), (122, 134), (122, 136), (123, 93), (123, 99), (123, 100), (123, 101), (123, 102), (123, 103), (123, 107), (123, 108), (123, 109), (123, 110), (123, 111), (123, 112), (123, 113), (123, 114), (123, 115), (123, 116), (123, 117), (123, 119), (123, 124), (123, 126), (123, 127), (123, 128), (123, 129), (123, 130), (123, 131), (123, 136), (124, 93), (124, 95), (124, 97), (124, 100), (124, 101), (124, 102), (124, 103), (124, 104), (124, 105), (124, 106), (124, 118), (124, 125), (124, 132), (124, 136), (125, 92), (125, 94), (125, 96), (125, 99), (125, 101), (125, 102), (125, 103), (125, 104), (125, 105), (125, 106), (125, 107), (125, 108), (125, 111), (125, 112), (125, 117), (125, 126), (125, 128), (125, 129), (125, 130), (125, 131), (125, 134), (125, 136), (126, 92), (126, 95), (126, 100), (126, 102), (126, 103), (126, 104), (126, 105), (126, 106), (126, 107), (126, 110), (126, 113), (126, 115), (126, 127), (126, 132), (126, 136), (127, 92), (127, 100), (127, 102), (127, 103), (127, 104), (127, 105), (127, 106), (127, 108), (127, 128), (127, 131), (127, 135), (127, 136), (128, 92), (128, 94), (128, 101), (128, 103), (128, 104), (128, 105), (128, 107), (128, 130), (128, 135), (128, 136), (129, 92), (129, 93), (129, 102), (129, 104), (129, 105), (129, 107), (129, 135), (129, 136), (130, 92), (130, 102), (130, 104), (130, 105), (130, 107), (130, 135), (131, 103), (131, 105), (131, 107), (132, 103), (132, 107), (133, 104), (133, 106), ) coordinates_AF3060 = ((123, 146), (123, 148), (123, 149), (123, 151), (124, 145), (124, 151), (125, 145), (125, 147), (125, 151), (126, 146), (126, 149), (127, 146), (128, 145), (128, 147), (129, 144), (129, 146), (130, 143), (130, 145), (131, 143), (131, 144), (132, 143), ) coordinates_ACFF2F = ((128, 149), (128, 152), (129, 148), (129, 152), (130, 147), (130, 149), (130, 150), (130, 152), (131, 146), (131, 149), (131, 150), (131, 152), (132, 148), (132, 149), (132, 150), (132, 152), (133, 147), (133, 148), (133, 149), (133, 150), (133, 152), (134, 143), (134, 148), (134, 149), (134, 150), (134, 152), (135, 148), (135, 149), (136, 147), (136, 149), (136, 151), (137, 147), (137, 150), (138, 146), (138, 149), (139, 145), (139, 148), (140, 143), (140, 146), (140, 148), (141, 141), (141, 147), (142, 139), (142, 143), (142, 144), (142, 145), (142, 147), (143, 138), (143, 141), (143, 142), (143, 147), (144, 139), ) coordinates_FFDAB9 = ((109, 74), (109, 76), (109, 86), (109, 88), (110, 74), (110, 78), (110, 83), (110, 84), (110, 85), (111, 73), (111, 75), (111, 77), (111, 80), (111, 81), (111, 82), (111, 83), (111, 84), (111, 85), (111, 87), (112, 73), (112, 75), (112, 79), (113, 72), (113, 75), (114, 72), (114, 75), (115, 72), (115, 75), (116, 72), (116, 75), (117, 72), (117, 75), (118, 73), (118, 74), ) coordinates_DA70D6 = ((116, 80), (116, 82), (116, 83), (116, 90), (117, 79), (117, 84), (117, 89), (117, 90), (118, 80), (118, 81), (118, 82), (118, 85), (118, 88), (118, 90), (119, 76), (119, 78), (119, 83), (119, 84), (119, 87), (119, 90), (120, 79), (120, 80), (120, 81), (120, 82), (120, 90), (121, 84), (121, 85), (121, 86), (121, 87), (121, 88), (121, 90), ) coordinates_00CED1 = ((74, 101), (74, 103), (74, 104), (74, 105), (75, 99), (75, 107), (75, 108), (76, 98), (76, 101), (76, 102), (76, 103), (76, 104), (76, 105), (76, 106), (76, 108), (77, 97), (77, 99), (77, 100), (77, 101), (77, 102), (77, 103), (77, 106), (77, 107), (77, 109), (78, 98), (78, 100), (78, 101), (78, 102), (78, 103), (78, 104), (78, 105), (78, 106), (78, 107), (78, 109), (79, 98), (79, 100), (79, 101), (79, 103), (79, 107), (79, 110), (80, 92), (80, 93), (80, 99), (80, 101), (80, 103), (80, 107), (80, 110), (81, 91), (81, 95), (81, 98), (81, 100), (81, 101), (81, 103), (81, 107), (81, 110), (82, 90), (82, 93), (82, 94), (82, 97), (82, 98), (82, 99), (82, 100), (82, 101), (82, 103), (82, 107), (82, 109), (82, 111), (83, 90), (83, 92), (83, 93), (83, 94), (83, 95), (83, 98), (83, 99), (83, 100), (83, 101), (83, 103), (83, 107), (83, 108), (83, 111), (84, 89), (84, 91), (84, 93), (84, 94), (84, 95), (84, 96), (84, 97), (84, 98), (84, 99), (84, 100), (84, 101), (84, 103), (84, 108), (84, 110), (84, 112), (85, 89), (85, 94), (85, 95), (85, 96), (85, 97), (85, 98), (85, 99), (85, 100), (85, 102), (85, 108), (85, 110), (85, 111), (85, 113), (86, 88), (86, 90), (86, 94), (86, 95), (86, 96), (86, 97), (86, 98), (86, 99), (86, 101), (86, 108), (86, 110), (86, 111), (86, 113), (87, 88), (87, 90), (87, 94), (87, 96), (87, 97), (87, 98), (87, 100), (87, 109), (87, 111), (87, 113), (88, 86), (88, 88), (88, 90), (88, 94), (88, 96), (88, 97), (88, 99), (88, 109), (88, 112), (89, 84), (89, 89), (89, 95), (89, 98), (89, 109), (89, 112), (90, 82), (90, 85), (90, 86), (90, 88), (90, 95), (90, 97), (90, 110), (90, 112), (91, 84), (91, 85), (91, 87), (92, 81), (92, 83), (92, 84), (92, 85), (92, 87), (93, 80), (93, 82), (93, 83), (93, 84), (93, 85), (93, 86), (93, 87), (93, 88), (93, 89), (93, 91), (93, 100), (93, 102), (94, 79), (94, 81), (94, 82), (94, 83), (94, 84), (94, 85), (94, 86), (94, 87), (94, 93), (94, 99), (94, 103), (95, 79), (95, 81), (95, 82), (95, 83), (95, 84), (95, 85), (95, 86), (95, 87), (95, 88), (95, 89), (95, 90), (95, 91), (95, 94), (95, 100), (95, 101), (95, 103), (96, 78), (96, 80), (96, 81), (96, 82), (96, 83), (96, 84), (96, 85), (96, 86), (96, 87), (96, 88), (96, 89), (96, 90), (96, 91), (96, 92), (96, 93), (96, 96), (96, 99), (96, 100), (96, 101), (96, 102), (96, 104), (97, 78), (97, 80), (97, 81), (97, 82), (97, 83), (97, 84), (97, 85), (97, 86), (97, 87), (97, 88), (97, 89), (97, 90), (97, 91), (97, 92), (97, 93), (97, 94), (97, 95), (97, 97), (97, 98), (97, 99), (97, 103), (98, 78), (98, 80), (98, 81), (98, 82), (98, 83), (98, 84), (98, 85), (98, 86), (98, 92), (98, 93), (98, 94), (98, 95), (98, 96), (98, 97), (98, 101), (98, 102), (99, 79), (99, 81), (99, 82), (99, 83), (99, 84), (99, 87), (99, 88), (99, 89), (99, 90), (99, 91), (99, 98), (99, 99), (100, 80), (100, 82), (100, 85), (100, 86), (100, 92), (100, 93), (100, 94), (100, 95), (100, 96), (101, 80), (101, 82), (101, 84), (102, 79), (102, 82), (103, 77), (103, 80), (103, 82), (104, 79), (104, 80), (104, 82), (104, 86), (104, 88), (105, 77), (105, 79), (105, 80), (105, 81), (105, 82), (105, 83), (105, 84), (105, 85), (105, 89), (106, 78), (107, 79), (107, 81), (107, 85), (107, 86), (107, 88), (108, 83), (108, 84), ) coordinates_A120F0 = ((122, 138), (122, 140), (122, 141), (122, 143), (123, 138), (123, 144), (124, 138), (124, 140), (124, 143), (125, 138), (126, 138), (126, 140), (127, 138), (127, 140), (128, 138), (128, 139), (129, 138), (130, 137), ) coordinates_ADFF2F = ((100, 137), (100, 139), (100, 140), (100, 141), (100, 142), (100, 143), (100, 144), (100, 146), (101, 137), (101, 146), (102, 138), (102, 140), (102, 141), (102, 144), (102, 146), (103, 143), (103, 146), (104, 144), (104, 146), (105, 144), (105, 147), (106, 144), (106, 147), (107, 144), (107, 146), (107, 148), (108, 143), (108, 145), (108, 146), (108, 147), (108, 149), (109, 142), (109, 144), (109, 145), (109, 146), (109, 147), (109, 149), (110, 143), (110, 146), (110, 147), (110, 149), (111, 144), (111, 147), (111, 149), (112, 146), (112, 149), (113, 147), (113, 149), ) coordinates_A020F0 = ((114, 142), (115, 140), (115, 143), (116, 141), (116, 144), (117, 141), (117, 144), (118, 141), (118, 143), (118, 145), (119, 142), (119, 146), (120, 142), (120, 146), (121, 145), (121, 146), ) coordinates_B03060 = ((111, 142), (112, 142), (112, 143), (113, 143), (113, 144), (114, 144), (114, 146), (115, 145), (115, 147), (115, 148), (116, 146), (116, 149), (116, 150), (116, 152), (117, 147), (117, 152), (118, 147), (118, 149), (118, 150), (118, 152), (119, 148), (119, 152), (120, 148), (120, 150), (120, 152), (121, 152), ) coordinates_ACD8E6 = ((79, 137), (80, 136), (80, 138), (81, 135), (81, 137), (81, 139), (82, 134), (82, 136), (82, 137), (83, 133), (83, 136), (83, 137), (83, 138), (83, 140), (84, 133), (84, 135), (84, 136), (84, 137), (84, 138), (84, 139), (84, 141), (85, 132), (85, 134), (85, 138), (85, 139), (85, 141), (86, 132), (86, 133), (86, 138), (86, 139), (86, 140), (86, 142), (87, 131), (87, 134), (87, 138), (87, 140), (87, 142), (88, 130), (88, 133), (88, 138), (88, 140), (88, 141), (88, 143), (89, 129), (89, 132), (89, 138), (89, 143), (90, 129), (90, 131), (90, 139), (90, 141), (90, 142), (91, 128), (91, 131), (92, 130), (93, 127), (93, 130), (94, 127), (94, 130), (95, 127), (95, 129), (96, 127), (96, 128), ) coordinates_FF3E96 = ((109, 102), (109, 104), (109, 105), (109, 106), (109, 108), (110, 100), (110, 109), (111, 99), (111, 102), (111, 103), (111, 104), (111, 105), (111, 106), (111, 107), (111, 109), (112, 98), (112, 100), (112, 101), (112, 102), (112, 103), (112, 104), (112, 105), (112, 106), (112, 107), (112, 109), (113, 98), (113, 100), (113, 101), (113, 102), (113, 103), (113, 104), (113, 105), (113, 106), (113, 107), (113, 109), (114, 98), (114, 100), (114, 101), (114, 102), (114, 103), (114, 104), (114, 105), (114, 106), (114, 108), (114, 109), (114, 132), (114, 134), (115, 97), (115, 99), (115, 100), (115, 101), (115, 102), (115, 103), (115, 104), (115, 105), (115, 106), (115, 108), (115, 109), (115, 131), (115, 135), (116, 93), (116, 97), (116, 99), (116, 100), (116, 101), (116, 102), (116, 103), (116, 104), (116, 105), (116, 106), (116, 107), (116, 109), (116, 131), (116, 133), (116, 134), (116, 136), (117, 93), (117, 94), (117, 97), (117, 99), (117, 100), (117, 101), (117, 102), (117, 103), (117, 104), (117, 105), (117, 106), (117, 107), (117, 109), (117, 113), (117, 114), (117, 115), (117, 117), (117, 130), (117, 132), (117, 133), (117, 134), (117, 135), (117, 137), (117, 139), (118, 93), (118, 96), (118, 97), (118, 104), (118, 105), (118, 106), (118, 107), (118, 108), (118, 109), (118, 111), (118, 112), (118, 113), (118, 114), (118, 118), (118, 125), (118, 127), (118, 128), (118, 131), (118, 132), (118, 133), (118, 135), (118, 136), (118, 139), (119, 93), (119, 98), (119, 99), (119, 100), (119, 101), (119, 102), (119, 103), (119, 112), (119, 116), (119, 118), (119, 124), (119, 134), (120, 93), (120, 95), (120, 96), (120, 104), (120, 105), (120, 106), (120, 107), (120, 108), (120, 110), (120, 117), (120, 118), (120, 124), (120, 126), (120, 127), (120, 128), (120, 129), (120, 130), (120, 131), (120, 132), (120, 133), (120, 136), (120, 137), (120, 138), (120, 140), (121, 110), ) coordinates_7EFFD4 = ((153, 123), (153, 125), (154, 124), (154, 126), (155, 125), (155, 127), (156, 126), (156, 128), (157, 126), (157, 129), (158, 126), (158, 129), (159, 127), (159, 130), (160, 127), (160, 130), (161, 127), (161, 130), (162, 127), (162, 130), (163, 127), (163, 130), (164, 127), (164, 128), (164, 130), (165, 128), (165, 130), (165, 134), (166, 128), (166, 130), (166, 133), (166, 135), (167, 128), (167, 130), (167, 131), (167, 132), (167, 134), (167, 136), (168, 128), (168, 129), (168, 130), (168, 133), (168, 135), (169, 129), (169, 131), (169, 134), (170, 129), (170, 133), (171, 128), (171, 131), (172, 128), (172, 130), ) coordinates_B12222 = ((149, 127), (150, 127), (151, 130), (152, 127), (152, 131), (153, 128), (153, 131), (154, 129), (154, 132), (155, 130), (155, 133), (156, 130), (156, 134), (157, 131), (157, 135), (158, 133), (158, 136), (159, 134), (159, 137), (160, 134), (160, 136), (160, 139), (160, 140), (161, 134), (161, 136), (161, 137), (161, 139), (162, 134), (162, 136), (162, 137), (162, 139), (163, 134), (163, 138), (164, 135), (164, 138), (165, 137), ) coordinates_7FFFD4 = ((71, 124), (71, 126), (72, 110), (72, 112), (72, 113), (72, 114), (72, 120), (72, 122), (72, 127), (72, 128), (73, 109), (73, 116), (73, 117), (73, 118), (73, 119), (73, 124), (73, 125), (73, 126), (73, 128), (74, 109), (74, 111), (74, 112), (74, 113), (74, 114), (74, 115), (74, 121), (74, 122), (74, 123), (74, 124), (74, 125), (74, 127), (75, 110), (75, 112), (75, 113), (75, 114), (75, 115), (75, 116), (75, 117), (75, 119), (75, 124), (75, 126), (76, 111), (76, 113), (76, 114), (76, 115), (76, 116), (76, 117), (76, 119), (76, 124), (76, 126), (77, 111), (77, 113), (77, 114), (77, 115), (77, 116), (77, 118), (77, 124), (77, 125), (78, 112), (78, 114), (78, 115), (78, 116), (78, 118), (78, 125), (79, 112), (79, 114), (79, 115), (79, 116), (79, 117), (79, 118), (79, 125), (80, 112), (80, 114), (80, 115), (80, 117), (81, 112), (81, 114), (81, 115), (81, 117), (82, 113), (82, 115), (82, 117), (83, 114), (83, 117), (84, 115), (84, 117), (85, 116), (86, 116), (87, 115), (87, 116), (88, 115), (88, 116), (89, 114), (89, 115), (90, 114), ) coordinates_B22222 = ((74, 129), (75, 132), (76, 128), (76, 130), (76, 133), (77, 128), (77, 130), (77, 131), (77, 132), (78, 127), (78, 131), (78, 132), (78, 133), (78, 135), (79, 127), (79, 129), (79, 130), (79, 131), (79, 132), (79, 135), (80, 131), (80, 134), (81, 131), (81, 133), (82, 131), (82, 132), (83, 130), (83, 131), (84, 129), (85, 128), (85, 130), (86, 128), (86, 129), (87, 127), (88, 128), (89, 126), (89, 127), (90, 125), (90, 126), (91, 126), (92, 124), (92, 125), (93, 123), (93, 125), (94, 123), (94, 125), (95, 125), (96, 124), ) coordinates_499B3C = ((144, 144), (144, 145), (145, 136), (145, 141), (145, 142), (145, 143), (145, 146), (146, 136), (146, 140), (146, 143), (146, 144), (146, 146), (147, 137), (147, 139), (147, 143), (147, 144), (147, 146), (148, 130), (148, 132), (148, 142), (148, 143), (148, 144), (148, 146), (149, 133), (149, 141), (149, 143), (149, 145), (150, 131), (150, 134), (150, 141), (150, 143), (150, 145), (151, 132), (151, 135), (151, 141), (151, 144), (152, 133), (152, 136), (152, 141), (152, 144), (153, 134), (153, 137), (153, 140), (153, 143), (154, 135), (154, 137), (154, 140), (154, 141), (154, 143), (155, 136), (155, 140), (155, 142), (156, 137), (156, 139), (156, 140), (156, 142), (157, 138), (157, 141), (158, 139), (158, 141), ) coordinates_633263 = ((154, 114), (155, 115), (155, 123), (156, 115), (156, 123), (157, 115), (157, 123), (157, 124), (158, 115), (158, 116), (158, 123), (158, 124), (159, 115), (159, 116), (159, 123), (159, 124), (160, 115), (160, 123), (161, 115), (161, 117), (161, 123), (161, 125), (162, 116), (162, 117), (162, 122), (162, 125), (163, 115), (163, 116), (163, 122), (163, 125), (164, 115), (164, 116), (164, 122), (164, 125), (165, 115), (165, 116), (165, 122), (165, 125), (166, 115), (166, 116), (166, 122), (166, 124), (166, 126), (167, 115), (167, 122), (167, 124), (167, 126), (168, 115), (168, 122), (168, 124), (168, 126), (169, 115), (169, 122), (169, 124), (169, 126), (170, 114), (170, 115), (170, 121), (170, 122), (170, 123), (170, 124), (170, 126), (171, 115), (171, 121), (171, 123), (171, 124), (171, 126), (172, 113), (172, 115), (172, 116), (172, 117), (172, 118), (172, 119), (172, 122), (172, 123), (173, 113), (173, 115), (173, 121), (173, 124), (174, 113), (174, 121), (174, 122), (174, 124), (175, 115), (175, 116), (175, 117), (175, 118), (175, 119), (175, 120), ) coordinates_4A9B3C = ((92, 140), (92, 142), (92, 143), (92, 145), (93, 140), (93, 145), (94, 141), (94, 143), (94, 145), (95, 142), (95, 145), (96, 142), (96, 145), (97, 139), (97, 145), (98, 138), (98, 140), (98, 141), (98, 142), (98, 143), (98, 145), (98, 146), ) coordinates_218B22 = ((150, 156), (150, 158), (151, 155), (151, 159), (152, 154), (152, 156), (152, 157), (152, 159), (153, 152), (153, 155), (153, 156), (153, 157), (153, 158), (153, 160), (154, 150), (154, 154), (154, 155), (154, 156), (154, 160), (155, 149), (155, 152), (155, 153), (155, 154), (155, 155), (155, 159), (156, 148), (156, 150), (156, 151), (156, 152), (156, 153), (156, 154), (156, 155), (156, 156), (157, 148), (157, 150), (157, 151), (157, 152), (157, 153), (157, 155), (158, 148), (158, 150), (158, 151), (158, 152), (158, 153), (158, 155), (159, 147), (159, 149), (159, 150), (159, 151), (159, 152), (159, 153), (159, 154), (159, 155), (159, 156), (160, 147), (160, 149), (160, 150), (160, 155), (160, 158), (161, 147), (161, 151), (161, 152), (161, 153), (161, 154), (161, 158), (162, 147), (162, 149), (162, 150), (162, 155), (162, 157), ) coordinates_228B22 = ((78, 147), (78, 148), (79, 147), (79, 149), (80, 147), (80, 150), (81, 147), (81, 148), (81, 149), (81, 151), (82, 148), (82, 151), (83, 148), (83, 150), (83, 152), (84, 148), (84, 150), (84, 152), (85, 149), (85, 151), (85, 153), (86, 149), (86, 151), (86, 153), (87, 149), (87, 151), (87, 153), (88, 150), (88, 152), (88, 154), (89, 150), (89, 152), (89, 154), (90, 151), (90, 154), (91, 151), (91, 154), (92, 152), (92, 155), (93, 153), (93, 156), (94, 154), (94, 158), (95, 155), (95, 158), )
470.988889
865
0.482177
eb7c3fb12e03f2b24dcc584553fc30f0b1f73b73
2,253
py
Python
examples/Redfish/expand_data.py
andreaslangnevyjel/python-ilorest-library
cd40e5ed9dfd615074d34ec6bb929dc8ea04a797
[ "Apache-2.0" ]
214
2016-04-04T12:24:52.000Z
2022-03-28T11:35:46.000Z
examples/Redfish/expand_data.py
andreaslangnevyjel/python-ilorest-library
cd40e5ed9dfd615074d34ec6bb929dc8ea04a797
[ "Apache-2.0" ]
139
2016-04-02T04:22:29.000Z
2022-03-25T06:54:45.000Z
examples/Redfish/expand_data.py
andreaslangnevyjel/python-ilorest-library
cd40e5ed9dfd615074d34ec6bb929dc8ea04a797
[ "Apache-2.0" ]
116
2016-04-04T20:39:42.000Z
2021-11-13T06:53:41.000Z
# Copyright 2020 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # -*- coding: utf-8 -*- """ An example of expanding data responses """ import sys import json from redfish import RedfishClient from redfish.rest.v1 import ServerDownOrUnreachableError def expand_data(_redfishobj, expand_url="/redfish/v1/"): response = _redfishobj.get(expand_url) exp_response = _redfishobj.get(expand_url+'?$expand=.') sys.stdout.write('Standard response:\n') sys.stdout.write('\t'+str(response.dict)+'\n') sys.stdout.write('Expanded response:\n') sys.stdout.write('\t'+str(exp_response.dict)+'\n') if __name__ == "__main__": # When running on the server locally use the following commented values #SYSTEM_URL = None #LOGIN_ACCOUNT = None #LOGIN_PASSWORD = None # When running remotely connect using the secured (https://) address, # account name, and password to send https requests # SYSTEM_URL acceptable examples: # "https://10.0.0.100" # "https://ilo.hostname" SYSTEM_URL = "https://10.0.0.100" LOGIN_ACCOUNT = "admin" LOGIN_PASSWORD = "password" #url to be expanded EXPAND_URL = "/redfish/v1/systems/" try: # Create a Redfish client object REDFISHOBJ = RedfishClient(base_url=SYSTEM_URL, username=LOGIN_ACCOUNT, \ password=LOGIN_PASSWORD) # Login with the Redfish client REDFISHOBJ.login() except ServerDownOrUnreachableError as excp: sys.stderr.write("ERROR: server not reachable or does not support RedFish.\n") sys.exit() expand_data(REDFISHOBJ, EXPAND_URL) REDFISHOBJ.logout()
35.203125
100
0.684421
6989358b5828b06e1b53569a06aa7612e515fb30
26,624
py
Python
tensorflow_probability/python/math/linalg_test.py
timudk/probability
8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103
[ "Apache-2.0" ]
null
null
null
tensorflow_probability/python/math/linalg_test.py
timudk/probability
8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103
[ "Apache-2.0" ]
null
null
null
tensorflow_probability/python/math/linalg_test.py
timudk/probability
8bdbf1c0b0f801edaf342f4ffc9caf1cfd6f1103
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for linear algebra.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # Dependency imports from absl.testing import parameterized import hypothesis as hp from hypothesis import strategies as hps from hypothesis.extra import numpy as hpnp import numpy as np import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf import tensorflow_probability as tfp from tensorflow_probability.python.internal import test_util as tfp_test_util from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top class _PinvTest(object): def expected_pinv(self, a, rcond): """Calls `np.linalg.pinv` but corrects its broken batch semantics.""" if a.ndim < 3: return np.linalg.pinv(a, rcond) if rcond is None: rcond = 10. * max(a.shape[-2], a.shape[-1]) * np.finfo(a.dtype).eps s = np.concatenate([a.shape[:-2], [a.shape[-1], a.shape[-2]]]) a_pinv = np.zeros(s, dtype=a.dtype) for i in np.ndindex(a.shape[:(a.ndim - 2)]): a_pinv[i] = np.linalg.pinv( a[i], rcond=rcond if isinstance(rcond, float) else rcond[i]) return a_pinv def test_symmetric(self): a_ = self.dtype([[1., .4, .5], [.4, .2, .25], [.5, .25, .35]]) a_ = np.stack([a_ + 1., a_], axis=0) # Batch of matrices. a = tf1.placeholder_with_default( a_, shape=a_.shape if self.use_static_shape else None) if self.use_default_rcond: rcond = None else: rcond = self.dtype([0., 0.01]) # Smallest 1 component is forced to zero. expected_a_pinv_ = self.expected_pinv(a_, rcond) a_pinv = tfp.math.pinv(a, rcond, validate_args=True) a_pinv_ = self.evaluate(a_pinv) self.assertAllClose(expected_a_pinv_, a_pinv_, atol=1e-5, rtol=1e-5) if not self.use_static_shape: return self.assertAllEqual(expected_a_pinv_.shape, a_pinv.shape) def test_nonsquare(self): a_ = self.dtype([[1., .4, .5, 1.], [.4, .2, .25, 2.], [.5, .25, .35, 3.]]) a_ = np.stack([a_ + 0.5, a_], axis=0) # Batch of matrices. a = tf1.placeholder_with_default( a_, shape=a_.shape if self.use_static_shape else None) if self.use_default_rcond: rcond = None else: # Smallest 2 components are forced to zero. rcond = self.dtype([0., 0.25]) expected_a_pinv_ = self.expected_pinv(a_, rcond) a_pinv = tfp.math.pinv(a, rcond, validate_args=True) a_pinv_ = self.evaluate(a_pinv) self.assertAllClose(expected_a_pinv_, a_pinv_, atol=1e-5, rtol=1e-4) if not self.use_static_shape: return self.assertAllEqual(expected_a_pinv_.shape, a_pinv.shape) @test_util.run_all_in_graph_and_eager_modes class PinvTestDynamic32DefaultRcond(tf.test.TestCase, _PinvTest): dtype = np.float32 use_static_shape = False use_default_rcond = True @test_util.run_all_in_graph_and_eager_modes class PinvTestStatic64DefaultRcond(tf.test.TestCase, _PinvTest): dtype = np.float64 use_static_shape = True use_default_rcond = True @test_util.run_all_in_graph_and_eager_modes class PinvTestDynamic32CustomtRcond(tf.test.TestCase, _PinvTest): dtype = np.float32 use_static_shape = False use_default_rcond = False @test_util.run_all_in_graph_and_eager_modes class PinvTestStatic64CustomRcond(tf.test.TestCase, _PinvTest): dtype = np.float64 use_static_shape = True use_default_rcond = False class _CholeskyExtend(tf.test.TestCase): def testCholeskyExtension(self): xs = np.random.random(7).astype(self.dtype)[:, tf.newaxis] xs = tf1.placeholder_with_default( xs, shape=xs.shape if self.use_static_shape else None) k = tfp.positive_semidefinite_kernels.MaternOneHalf() mat = k.matrix(xs, xs) chol = tf.linalg.cholesky(mat) ys = np.random.random(3).astype(self.dtype)[:, tf.newaxis] ys = tf1.placeholder_with_default( ys, shape=ys.shape if self.use_static_shape else None) xsys = tf.concat([xs, ys], 0) new_chol_expected = tf.linalg.cholesky(k.matrix(xsys, xsys)) new_chol = tfp.math.cholesky_concat(chol, k.matrix(xsys, ys)) self.assertAllClose(new_chol_expected, new_chol) @hp.given(hps.data()) @hp.settings(deadline=None, max_examples=10, derandomize=tfp_test_util.derandomize_hypothesis()) def testCholeskyExtensionRandomized(self, data): jitter = lambda n: tf.linalg.eye(n, dtype=self.dtype) * 1e-5 target_bs = data.draw(hpnp.array_shapes()) prev_bs, new_bs = data.draw(tfp_test_util.broadcasting_shapes(target_bs, 2)) ones = tf.TensorShape([1] * len(target_bs)) smallest_shared_shp = tuple(np.min( [tf.broadcast_static_shape(ones, shp).as_list() for shp in [prev_bs, new_bs]], axis=0)) z = data.draw(hps.integers(min_value=1, max_value=12)) n = data.draw(hps.integers(min_value=0, max_value=z - 1)) m = z - n np.random.seed(data.draw(hps.integers(min_value=0, max_value=2**32 - 1))) xs = np.random.uniform(size=smallest_shared_shp + (n,)) data.draw(hps.just(xs)) xs = (xs + np.zeros(prev_bs.as_list() + [n]))[..., np.newaxis] xs = xs.astype(self.dtype) xs = tf1.placeholder_with_default( xs, shape=xs.shape if self.use_static_shape else None) k = tfp.positive_semidefinite_kernels.MaternOneHalf() mat = k.matrix(xs, xs) + jitter(n) chol = tf.linalg.cholesky(mat) ys = np.random.uniform(size=smallest_shared_shp + (m,)) data.draw(hps.just(ys)) ys = (ys + np.zeros(new_bs.as_list() + [m]))[..., np.newaxis] ys = ys.astype(self.dtype) ys = tf1.placeholder_with_default( ys, shape=ys.shape if self.use_static_shape else None) xsys = tf.concat([xs + tf.zeros(target_bs + (n, 1), dtype=self.dtype), ys + tf.zeros(target_bs + (m, 1), dtype=self.dtype)], axis=-2) new_chol_expected = tf.linalg.cholesky(k.matrix(xsys, xsys) + jitter(z)) new_chol = tfp.math.cholesky_concat( chol, k.matrix(xsys, ys) + jitter(z)[:, n:]) self.assertAllClose(new_chol_expected, new_chol, rtol=1e-5, atol=1e-5) @test_util.run_all_in_graph_and_eager_modes class CholeskyExtend32Static(_CholeskyExtend): dtype = np.float32 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class CholeskyExtend64Dynamic(_CholeskyExtend): dtype = np.float64 use_static_shape = False del _CholeskyExtend class _PivotedCholesky(tf.test.TestCase, parameterized.TestCase): def _random_batch_psd(self, dim): matrix = np.random.random([2, dim, dim]) matrix = np.matmul(matrix, np.swapaxes(matrix, -2, -1)) matrix = (matrix + np.diag(np.arange(dim) * .1)).astype(self.dtype) masked_shape = ( matrix.shape if self.use_static_shape else [None] * len(matrix.shape)) matrix = tf1.placeholder_with_default(matrix, shape=masked_shape) return matrix def testPivotedCholesky(self): dim = 11 matrix = self._random_batch_psd(dim) true_diag = tf.linalg.diag_part(matrix) pchol = tfp.math.pivoted_cholesky(matrix, max_rank=1) mat = tf.matmul(pchol, pchol, transpose_b=True) diag_diff_prev = self.evaluate(tf.abs(tf.linalg.diag_part(mat) - true_diag)) diff_norm_prev = self.evaluate( tf.linalg.norm(tensor=mat - matrix, ord='fro', axis=[-1, -2])) for rank in range(2, dim + 1): # Specifying diag_rtol forces the full max_rank decomposition. pchol = tfp.math.pivoted_cholesky(matrix, max_rank=rank, diag_rtol=-1) zeros_per_col = dim - tf.math.count_nonzero(pchol, axis=-2) mat = tf.matmul(pchol, pchol, transpose_b=True) pchol_shp, diag_diff, diff_norm, zeros_per_col = self.evaluate([ tf.shape(pchol), tf.abs(tf.linalg.diag_part(mat) - true_diag), tf.linalg.norm(tensor=mat - matrix, ord='fro', axis=[-1, -2]), zeros_per_col ]) self.assertAllEqual([2, dim, rank], pchol_shp) self.assertAllEqual( np.ones([2, rank], dtype=np.bool), zeros_per_col >= np.arange(rank)) self.assertAllLessEqual(diag_diff - diag_diff_prev, np.finfo(self.dtype).resolution) self.assertAllLessEqual(diff_norm - diff_norm_prev, np.finfo(self.dtype).resolution) diag_diff_prev, diff_norm_prev = diag_diff, diff_norm def testGradient(self): dim = 11 matrix = self._random_batch_psd(dim) _, dmatrix = tfp.math.value_and_gradient( lambda matrix: tfp.math.pivoted_cholesky(matrix, max_rank=dim // 3), matrix) self.assertIsNotNone(dmatrix) self.assertAllGreater( tf.linalg.norm(tensor=dmatrix, ord='fro', axis=[-1, -2]), 0.) @test_util.enable_control_flow_v2 def testGradientTapeCFv2(self): dim = 11 matrix = self._random_batch_psd(dim) with tf.GradientTape() as tape: tape.watch(matrix) pchol = tfp.math.pivoted_cholesky(matrix, max_rank=dim // 3) dmatrix = tape.gradient( pchol, matrix, output_gradients=tf.ones_like(pchol) * .01) self.assertIsNotNone(dmatrix) self.assertAllGreater( tf.linalg.norm(tensor=dmatrix, ord='fro', axis=[-1, -2]), 0.) # pyformat: disable @parameterized.parameters( # Inputs are randomly shuffled arange->tril; outputs from gpytorch. ( np.array([ [7., 0, 0, 0, 0, 0], [9, 13, 0, 0, 0, 0], [4, 10, 6, 0, 0, 0], [18, 1, 2, 14, 0, 0], [5, 11, 20, 3, 17, 0], [19, 12, 16, 15, 8, 21] ]), np.array([ [3.4444, -1.3545, 4.084, 1.7674, -1.1789, 3.7562], [8.4685, 1.2821, 3.1179, 12.9197, 0.0000, 0.0000], [7.5621, 4.8603, 0.0634, 7.3942, 4.0637, 0.0000], [15.435, -4.8864, 16.2137, 0.0000, 0.0000, 0.0000], [18.8535, 22.103, 0.0000, 0.0000, 0.0000, 0.0000], [38.6135, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000] ])), ( np.array([ [1, 0, 0], [2, 3, 0], [4, 5, 6.] ]), np.array([ [0.4558, 0.3252, 0.8285], [2.6211, 2.4759, 0.0000], [8.7750, 0.0000, 0.0000] ])), ( np.array([ [6, 0, 0], [3, 2, 0], [4, 1, 5.] ]), np.array([ [3.7033, 4.7208, 0.0000], [2.1602, 2.1183, 1.9612], [6.4807, 0.0000, 0.0000] ]))) # pyformat: enable def testOracleExamples(self, mat, oracle_pchol): mat = np.matmul(mat, mat.T) for rank in range(1, mat.shape[-1] + 1): self.assertAllClose( oracle_pchol[..., :rank], tfp.math.pivoted_cholesky(mat, max_rank=rank, diag_rtol=-1), atol=1e-4) @test_util.run_all_in_graph_and_eager_modes class PivotedCholesky32Static(_PivotedCholesky): dtype = np.float32 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class PivotedCholesky64Dynamic(_PivotedCholesky): dtype = np.float64 use_static_shape = False del _PivotedCholesky def make_tensor_hiding_attributes(value, hide_shape, hide_value=True): if not hide_value: return tf.convert_to_tensor(value=value) shape = None if hide_shape else getattr(value, 'shape', None) return tf1.placeholder_with_default(value, shape=shape) class _LUReconstruct(object): dtype = np.float32 use_static_shape = True def test_non_batch(self): x_ = np.array( [[3, 4], [1, 2]], dtype=self.dtype) x = tf1.placeholder_with_default( x_, shape=x_.shape if self.use_static_shape else None) y = tfp.math.lu_reconstruct(*tf.linalg.lu(x), validate_args=True) y_ = self.evaluate(y) if self.use_static_shape: self.assertAllEqual(x_.shape, y.shape) self.assertAllClose(x_, y_, atol=0., rtol=1e-3) def test_batch(self): x_ = np.array( [ [[3, 4], [1, 2]], [[7, 8], [3, 4]], ], dtype=self.dtype) x = tf1.placeholder_with_default( x_, shape=x_.shape if self.use_static_shape else None) y = tfp.math.lu_reconstruct(*tf.linalg.lu(x), validate_args=True) y_ = self.evaluate(y) if self.use_static_shape: self.assertAllEqual(x_.shape, y.shape) self.assertAllClose(x_, y_, atol=0., rtol=1e-3) @test_util.run_all_in_graph_and_eager_modes class LUReconstructStatic(tf.test.TestCase, _LUReconstruct): use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class LUReconstructDynamic(tf.test.TestCase, _LUReconstruct): use_static_shape = False class _LUMatrixInverse(object): dtype = np.float32 use_static_shape = True def test_non_batch(self): x_ = np.array([[1, 2], [3, 4]], dtype=self.dtype) x = tf1.placeholder_with_default( x_, shape=x_.shape if self.use_static_shape else None) y = tfp.math.lu_matrix_inverse(*tf.linalg.lu(x), validate_args=True) y_ = self.evaluate(y) if self.use_static_shape: self.assertAllEqual(x_.shape, y.shape) self.assertAllClose(np.linalg.inv(x_), y_, atol=0., rtol=1e-3) def test_batch(self): x_ = np.array( [ [[1, 2], [3, 4]], [[7, 8], [3, 4]], [[0.25, 0.5], [0.75, -2.]], ], dtype=self.dtype) x = tf1.placeholder_with_default( x_, shape=x_.shape if self.use_static_shape else None) y = tfp.math.lu_matrix_inverse(*tf.linalg.lu(x), validate_args=True) y_ = self.evaluate(y) if self.use_static_shape: self.assertAllEqual(x_.shape, y.shape) self.assertAllClose(np.linalg.inv(x_), y_, atol=0., rtol=1e-3) @test_util.run_all_in_graph_and_eager_modes class LUMatrixInverseStatic(tf.test.TestCase, _LUMatrixInverse): use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class LUMatrixInverseDynamic(tf.test.TestCase, _LUMatrixInverse): use_static_shape = False class _LUSolve(object): dtype = np.float32 use_static_shape = True def test_non_batch(self): x_ = np.array( [[1, 2], [3, 4]], dtype=self.dtype) x = tf1.placeholder_with_default( x_, shape=x_.shape if self.use_static_shape else None) rhs_ = np.array([[1, 1]], dtype=self.dtype).T rhs = tf1.placeholder_with_default( rhs_, shape=rhs_.shape if self.use_static_shape else None) lower_upper, perm = tf.linalg.lu(x) y = tfp.math.lu_solve(lower_upper, perm, rhs, validate_args=True) y_, perm_ = self.evaluate([y, perm]) self.assertAllEqual([1, 0], perm_) expected_ = np.linalg.solve(x_, rhs_) if self.use_static_shape: self.assertAllEqual(expected_.shape, y.shape) self.assertAllClose(expected_, y_, atol=0., rtol=1e-3) def test_batch_broadcast(self): x_ = np.array( [ [[1, 2], [3, 4]], [[7, 8], [3, 4]], [[0.25, 0.5], [0.75, -2.]], ], dtype=self.dtype) x = tf1.placeholder_with_default( x_, shape=x_.shape if self.use_static_shape else None) rhs_ = np.array([[1, 1]], dtype=self.dtype).T rhs = tf1.placeholder_with_default( rhs_, shape=rhs_.shape if self.use_static_shape else None) lower_upper, perm = tf.linalg.lu(x) y = tfp.math.lu_solve(lower_upper, perm, rhs, validate_args=True) y_, perm_ = self.evaluate([y, perm]) self.assertAllEqual([[1, 0], [0, 1], [1, 0]], perm_) expected_ = np.linalg.solve(x_, rhs_[np.newaxis]) if self.use_static_shape: self.assertAllEqual(expected_.shape, y.shape) self.assertAllClose(expected_, y_, atol=0., rtol=1e-3) @test_util.run_all_in_graph_and_eager_modes class LUSolveStatic(tf.test.TestCase, _LUSolve): use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class LUSolveDynamic(tf.test.TestCase, _LUSolve): use_static_shape = False class _SparseOrDenseMatmul(object): dtype = np.float32 use_static_shape = True use_sparse_tensor = False def _make_placeholder(self, x): return tf1.placeholder_with_default( x, shape=(x.shape if self.use_static_shape else None)) def _make_sparse_placeholder(self, x): indices_placeholder = self._make_placeholder(x.indices) values_placeholder = self._make_placeholder(x.values) if self.use_static_shape: dense_shape_placeholder = x.dense_shape else: dense_shape_placeholder = self._make_placeholder(x.dense_shape) return tf.SparseTensor( indices=indices_placeholder, values=values_placeholder, dense_shape=dense_shape_placeholder) def verify_sparse_dense_matmul(self, x_, y_): if self.use_sparse_tensor: x = self._make_sparse_placeholder(tfp.math.dense_to_sparse(x_)) else: x = self._make_placeholder(x_) y = self._make_placeholder(y_) z = tfp.math.sparse_or_dense_matmul(x, y) z_ = self.evaluate(z) if self.use_static_shape: batch_shape = x_.shape[:-2] self.assertAllEqual(z_.shape, batch_shape + (x_.shape[-2], y_.shape[-1])) self.assertAllClose(z_, np.matmul(x_, y_), atol=0., rtol=1e-3) def verify_sparse_dense_matvecmul(self, x_, y_): if self.use_sparse_tensor: x = self._make_sparse_placeholder(tfp.math.dense_to_sparse(x_)) else: x = self._make_placeholder(x_) y = self._make_placeholder(y_) z = tfp.math.sparse_or_dense_matvecmul(x, y) z_ = self.evaluate(z) if self.use_static_shape: batch_shape = x_.shape[:-2] self.assertAllEqual(z_.shape, batch_shape + (x_.shape[-2],)) self.assertAllClose( z_[..., np.newaxis], np.matmul(x_, y_[..., np.newaxis]), atol=0., rtol=1e-3) def test_non_batch_matmul(self): x_ = np.array([[3, 4, 0], [1, 0, 3]], dtype=self.dtype) y_ = np.array([[1, 0], [9, 0], [3, 1]], dtype=self.dtype) self.verify_sparse_dense_matmul(x_, y_) def test_non_batch_matvecmul(self): x_ = np.array([[3, 0, 5], [0, 2, 3]], dtype=self.dtype) y_ = np.array([1, 0, 9], dtype=self.dtype) self.verify_sparse_dense_matvecmul(x_, y_) def test_batch_matmul(self): x_ = np.array([ [[3, 4, 0], [1, 0, 3]], [[6, 0, 0], [0, 0, 0]], ], dtype=self.dtype) y_ = np.array([ [[1, 0], [9, 0], [3, 1]], [[2, 2], [5, 6], [0, 1]], ], dtype=self.dtype) self.verify_sparse_dense_matmul(x_, y_) def test_batch_matvecmul(self): x_ = np.array([ [[3, 0, 5], [0, 2, 3]], [[1, 1, 0], [6, 0, 0]], ], dtype=self.dtype) y_ = np.array([ [1, 0, 9], [0, 0, 2], ], dtype=self.dtype) self.verify_sparse_dense_matvecmul(x_, y_) @test_util.run_all_in_graph_and_eager_modes class SparseOrDenseMatmulStatic(tf.test.TestCase, _SparseOrDenseMatmul): use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class SparseOrDenseMatmulDynamic(tf.test.TestCase, _SparseOrDenseMatmul): use_static_shape = False @test_util.run_all_in_graph_and_eager_modes class SparseOrDenseMatmulStaticSparse(tf.test.TestCase, _SparseOrDenseMatmul): use_static_shape = True use_sparse_tensor = True @test_util.run_all_in_graph_and_eager_modes class SparseOrDenseMatmulDynamicSparse(tf.test.TestCase, _SparseOrDenseMatmul): use_static_shape = False use_sparse_tensor = True class _MatrixRankTest(object): def test_batch_default_tolerance(self): x_ = np.array([[[2, 3, -2], # = row2+row3 [-1, 1, -2], [3, 2, 0]], [[0, 2, 0], # = 2*row2 [0, 1, 0], [0, 3, 0]], # = 3*row2 [[1, 0, 0], [0, 1, 0], [0, 0, 1]]], self.dtype) x = tf1.placeholder_with_default( x_, shape=x_.shape if self.use_static_shape else None) self.assertAllEqual([2, 1, 3], self.evaluate(tfp.math.matrix_rank(x))) def test_custom_tolerance_broadcasts(self): q = tf.linalg.qr(tf.random.uniform([3, 3], dtype=self.dtype))[0] e = tf.constant([0.1, 0.2, 0.3], dtype=self.dtype) a = tf.linalg.solve(q, tf.transpose(a=e * q), adjoint=True) self.assertAllEqual([3, 2, 1, 0], self.evaluate(tfp.math.matrix_rank( a, tol=[[0.09], [0.19], [0.29], [0.31]]))) def test_nonsquare(self): x_ = np.array([[[2, 3, -2, 2], # = row2+row3 [-1, 1, -2, 4], [3, 2, 0, -2]], [[0, 2, 0, 6], # = 2*row2 [0, 1, 0, 3], [0, 3, 0, 9]]], # = 3*row2 self.dtype) x = tf1.placeholder_with_default( x_, shape=x_.shape if self.use_static_shape else None) self.assertAllEqual([2, 1], self.evaluate(tfp.math.matrix_rank(x))) @test_util.run_all_in_graph_and_eager_modes class MatrixRankStatic32Test(tf.test.TestCase, _MatrixRankTest): dtype = np.float32 use_static_shape = True @test_util.run_all_in_graph_and_eager_modes class MatrixRankDynamic64Test(tf.test.TestCase, _MatrixRankTest): dtype = np.float64 use_static_shape = False @test_util.run_all_in_graph_and_eager_modes class FillTriangularTest(tf.test.TestCase): def _fill_triangular(self, x, upper=False): """Numpy implementation of `fill_triangular`.""" x = np.asarray(x) # Formula derived by solving for n: m = n(n+1)/2. m = np.int32(x.shape[-1]) n = np.sqrt(0.25 + 2. * m) - 0.5 if n != np.floor(n): raise ValueError('Invalid shape.') n = np.int32(n) # We can't do: `x[..., -(n**2-m):]` because this doesn't correctly handle # `m == n == 1`. Hence, we do absolute indexing. x_tail = x[..., (m - (n * n - m)):] y = np.concatenate( [x, x_tail[..., ::-1]] if upper else [x_tail, x[..., ::-1]], axis=-1) y = y.reshape(np.concatenate([ np.int32(x.shape[:-1]), np.int32([n, n]), ], axis=0)) return np.triu(y) if upper else np.tril(y) def _run_test(self, x_, use_deferred_shape=False, **kwargs): x_ = np.asarray(x_) static_shape = None if use_deferred_shape else x_.shape x_pl = tf1.placeholder_with_default(x_, shape=static_shape) # Add `zeros_like(x)` such that x's value and gradient are identical. We # do this so we can ensure each gradient value is mapped to the right # gradient location. (Not doing this means the gradient wrt `x` is simple # `ones_like(x)`.) # Note: # zeros_like_x_pl == zeros_like(x_pl) # gradient(zeros_like_x_pl, x_pl) == x_pl - 1 def _zeros_like(x): return x * tf.stop_gradient(x - 1.) - tf.stop_gradient(x * (x - 1.)) actual, grad_actual = tfp.math.value_and_gradient( lambda x: tfp.math.fill_triangular( # pylint: disable=g-long-lambda x + _zeros_like(x), **kwargs), x_pl) actual_, grad_actual_ = self.evaluate([actual, grad_actual]) expected = self._fill_triangular(x_, **kwargs) if use_deferred_shape and not tf.executing_eagerly(): self.assertEqual(None, actual.shape) else: self.assertAllEqual(expected.shape, actual.shape) self.assertAllClose(expected, actual_, rtol=1e-8, atol=1e-9) self.assertAllClose(x_, grad_actual_, rtol=1e-8, atol=1e-9) def testCorrectlyMakes1x1TriLower(self): self._run_test(np.random.randn(3, int(1*2/2))) def testCorrectlyMakesNoBatchTriLower(self): self._run_test(np.random.randn(int(4*5/2))) def testCorrectlyMakesBatchTriLower(self): self._run_test(np.random.randn(2, 3, int(3*4/2))) def testCorrectlyMakesBatchTriLowerUnknownShape(self): self._run_test(np.random.randn(2, 3, int(3*4/2)), use_deferred_shape=True) def testCorrectlyMakesBatch7x7TriLowerUnknownShape(self): self._run_test(np.random.randn(2, 3, int(7*8/2)), use_deferred_shape=True) def testCorrectlyMakesBatch7x7TriLower(self): self._run_test(np.random.randn(2, 3, int(7*8/2))) def testCorrectlyMakes1x1TriUpper(self): self._run_test(np.random.randn(3, int(1*2/2)), upper=True) def testCorrectlyMakesNoBatchTriUpper(self): self._run_test(np.random.randn(int(4*5/2)), upper=True) def testCorrectlyMakesBatchTriUpper(self): self._run_test(np.random.randn(2, 2, int(3*4/2)), upper=True) def testCorrectlyMakesBatchTriUpperUnknownShape(self): self._run_test(np.random.randn(2, 2, int(3*4/2)), use_deferred_shape=True, upper=True) def testCorrectlyMakesBatch7x7TriUpperUnknownShape(self): self._run_test(np.random.randn(2, 3, int(7*8/2)), use_deferred_shape=True, upper=True) def testCorrectlyMakesBatch7x7TriUpper(self): self._run_test(np.random.randn(2, 3, int(7*8/2)), upper=True) @test_util.run_all_in_graph_and_eager_modes class FillTriangularInverseTest(FillTriangularTest): def _run_test(self, x_, use_deferred_shape=False, **kwargs): x_ = np.asarray(x_) static_shape = None if use_deferred_shape else x_.shape x_pl = tf1.placeholder_with_default(x_, shape=static_shape) zeros_like_x_pl = (x_pl * tf.stop_gradient(x_pl - 1.) - tf.stop_gradient(x_pl * (x_pl - 1.))) x = x_pl + zeros_like_x_pl actual = tfp.math.fill_triangular(x, **kwargs) inverse_actual = tfp.math.fill_triangular_inverse(actual, **kwargs) inverse_actual_ = self.evaluate(inverse_actual) if use_deferred_shape and not tf.executing_eagerly(): self.assertEqual(None, inverse_actual.shape) else: self.assertAllEqual(x_.shape, inverse_actual.shape) self.assertAllEqual(x_, inverse_actual_) if __name__ == '__main__': tf.test.main()
33.872774
115
0.644231
bf628907100ed45f03c6ba2481f962223005d12b
1,945
py
Python
tests/functional/python_tests/cli_wallet/tests/009_get_open_orders.py
drov0/hive
747380ac6d1d621a99c94ccf3fd24bbece754a57
[ "MIT" ]
283
2020-03-20T02:13:12.000Z
2022-03-31T22:40:07.000Z
tests/functional/python_tests/cli_wallet/tests/009_get_open_orders.py
drov0/hive
747380ac6d1d621a99c94ccf3fd24bbece754a57
[ "MIT" ]
19
2020-03-20T03:09:16.000Z
2021-08-28T22:35:09.000Z
tests/functional/python_tests/cli_wallet/tests/009_get_open_orders.py
drov0/hive
747380ac6d1d621a99c94ccf3fd24bbece754a57
[ "MIT" ]
94
2020-03-20T01:53:05.000Z
2022-03-04T11:08:23.000Z
#!/usr/bin/python3 import time from utils.test_utils import * from utils.cmd_args import args from utils.cli_wallet import CliWallet from utils.logger import log, init_logger if __name__ == "__main__": with Test(__file__): with CliWallet( args ) as wallet: creator, user = make_user_for_tests(wallet) result_before = wallet.get_open_orders(user)['result'] assert(len(result_before) == 0) log.info( "testing buy order :10.000 TESTS for 1000.000 TBD created by user {}".format( user ) ) wallet.create_order(user, "1", "10.000 TESTS", "1000.000 TBD", "false", "9999", "true") result_sell = wallet.get_open_orders(user)['result'] assert(len(result_sell) == 1) assert(result_sell[0]['orderid'] == 1) assert(result_sell[0]['seller'] == user) assert(result_sell[0]['for_sale'] == 10000) assert(result_sell[0]['real_price'] == '100.00000000000000000') assert(result_sell[0]['sell_price']['base'] == '10.000 TESTS') assert(result_sell[0]['sell_price']['quote'] == '1000.000 TBD') assert(not result_sell[0]['rewarded']) log.info( "testing buy order :10.000 TBD for 1000.000 TESTS created by user {}".format( user ) ) wallet.create_order(user, "2", "10.000 TBD", "1000.000 TESTS", "false", "9999", "true") result_buy = wallet.get_open_orders(user)['result'] assert(len(result_buy) == 2) assert(result_buy[1]['orderid'] == 2) assert(result_buy[1]['seller'] == user) assert(result_buy[1]['for_sale'] == 10000) assert(result_buy[1]['real_price'] == '0.01000000000000000') assert(result_buy[1]['sell_price']['base'] == '10.000 TBD') assert(result_buy[1]['sell_price']['quote'] == '1000.000 TESTS') assert(not result_buy[1]['rewarded'])
47.439024
108
0.597943
29e2d85ba89fbd087f080618a4c9b26454eeac13
5,258
py
Python
flsim/utils/tests/test_training_time_estimator.py
JohnlNguyen/FLSim
a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb
[ "BSD-3-Clause" ]
79
2021-12-09T18:05:09.000Z
2022-03-23T20:43:46.000Z
flsim/utils/tests/test_training_time_estimator.py
JohnlNguyen/FLSim
a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb
[ "BSD-3-Clause" ]
11
2021-12-30T17:54:04.000Z
2022-03-23T17:23:00.000Z
flsim/utils/tests/test_training_time_estimator.py
JohnlNguyen/FLSim
a5ed7c0b84499cd9dbc5fe95f8bcb4ba8ab5a5cb
[ "BSD-3-Clause" ]
9
2021-12-09T19:55:22.000Z
2022-03-15T00:02:08.000Z
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch from flsim.common.pytest_helper import assertEqual, assertAlmostEqual from flsim.utils.timing.training_duration_distribution import ( PerUserUniformDurationDistribution, PerUserUniformDurationDistributionConfig, PerUserHalfNormalDurationDistribution, PerUserHalfNormalDurationDistributionConfig, DurationDistributionFromListConfig, DurationDistributionFromList, DurationInfo, ) from flsim.utils.timing.training_time_estimator import ( get_training_time, AsyncTrainingTimeEstimator, SyncTrainingTimeEstimator, ) from omegaconf import OmegaConf class TestTrainingTimeEstimator: def test_time_from_list(self) -> None: """ Test training time from list Assuming UPR = 2 Sync would be the sum of slowest user between rounds round 1 user_1: duration = 4 user_2: duration = 3 round 2 user_3: duration = 2 user_4: duration = 1 total = 4 + 2 = 6 Async would be the user_1: duration = 4, start_time = 1 user_2: duration = 3, start_time = 1 user_3: duration = 2, start_time = 2 user_4: duration = 1, start_time = 3 users training @ time 1: user 1, user 2 users training @ time 3: user 2, user 3 users training @ time 4: user 3, user 4 users training @ time 5: user 4 finishes training """ training_events = [ DurationInfo(duration=4), DurationInfo(duration=3), DurationInfo(duration=2), DurationInfo(duration=1), ] async_start_times = [1, 1, 2, 3] sync_training_dist = DurationDistributionFromList( **OmegaConf.structured( DurationDistributionFromListConfig(training_events=training_events) ) ) async_training_dist = DurationDistributionFromList( **OmegaConf.structured( DurationDistributionFromListConfig(training_events=training_events) ) ) num_users = len(training_events) epochs = 1 users_per_round = 2 sync_estimator = SyncTrainingTimeEstimator( total_users=len(training_events), users_per_round=users_per_round, epochs=epochs, training_dist=sync_training_dist, ) async_estimator = AsyncTrainingTimeEstimator( total_users=num_users, users_per_round=users_per_round, epochs=epochs, training_dist=async_training_dist, start_times=async_start_times, ) async_time = async_estimator.training_time() sync_time = sync_estimator.training_time() assertEqual(sync_time, 6) assertEqual(async_time, 5) def test_uniform_training_time(self) -> None: """ Test uniform training time Sync and Async should have the same training time if UPR = 1 and duration_min close to duration_mean """ torch.manual_seed(0) num_users = 1000 epochs = 1 users_per_round = 1 duration_mean = 1.00 duration_min = 0.99999 training_dist = PerUserUniformDurationDistribution( **OmegaConf.structured( PerUserUniformDurationDistributionConfig( training_duration_mean=duration_mean, training_duration_min=duration_min, ) ) ) sync_time, async_time = get_training_time( num_users=num_users, users_per_round=users_per_round, epochs=epochs, training_dist=training_dist, ) assertAlmostEqual(sync_time, async_time, delta=1e-3) def test_per_user_half_normal(self) -> None: """ Test half normal training time Sync and Async should have the following training time sync_training_time = async_training_time = num_users * duration_min if UPR = 1 and duraton_std is close to 0 """ torch.manual_seed(0) num_users = 1000 epochs = 1 users_per_round = 1 duration_std = 1e-6 duration_min = 1.0 training_dist = PerUserHalfNormalDurationDistribution( **OmegaConf.structured( PerUserHalfNormalDurationDistributionConfig( training_duration_sd=duration_std, training_duration_min=duration_min, ) ) ) sync_time, async_time = get_training_time( num_users=num_users, users_per_round=users_per_round, epochs=epochs, training_dist=training_dist, ) assertAlmostEqual(sync_time, async_time, delta=1e-3) assertAlmostEqual(sync_time, num_users * duration_min, delta=1e-3) assertAlmostEqual(async_time, num_users * duration_min, delta=1e-3)
32.257669
83
0.625333
ce94faadb39823e06566cf7c720f348a448bb628
1,470
py
Python
nicos_demo/vsans1/setups/pressure.py
ebadkamil/nicos
0355a970d627aae170c93292f08f95759c97f3b5
[ "CC-BY-3.0", "Apache-2.0", "CC-BY-4.0" ]
12
2019-11-06T15:40:36.000Z
2022-01-01T16:23:00.000Z
nicos_demo/vsans1/setups/pressure.py
ebadkamil/nicos
0355a970d627aae170c93292f08f95759c97f3b5
[ "CC-BY-3.0", "Apache-2.0", "CC-BY-4.0" ]
91
2020-08-18T09:20:26.000Z
2022-02-01T11:07:14.000Z
nicos_demo/vsans1/setups/pressure.py
ISISComputingGroup/nicos
94cb4d172815919481f8c6ee686f21ebb76f2068
[ "CC-BY-3.0", "Apache-2.0", "CC-BY-4.0" ]
6
2020-01-11T10:52:30.000Z
2022-02-25T12:35:23.000Z
description = 'Vacuum sensors of detector and collimation tube' group = 'lowlevel' devices = dict( det_tube = device('nicos.devices.generic.ManualMove', description = 'pressure detector tube: Tube', abslimits = (0, 1000), fmtstr = '%.4G', pollinterval = 15, maxage = 60, lowlevel = True, unit = 'mbar', ), det_nose = device('nicos.devices.generic.ManualMove', description = 'pressure detector tube: Nose', abslimits = (0, 1000), fmtstr = '%.4G', pollinterval = 15, maxage = 60, lowlevel = True, unit = 'mbar', ), coll_tube = device('nicos.devices.generic.ManualMove', description = 'pressure collimation tube: Tube', abslimits = (0, 1000), fmtstr = '%.4G', pollinterval = 15, maxage = 60, lowlevel = True, unit = 'mbar', ), coll_nose = device('nicos.devices.generic.ManualMove', description = 'pressure collimation tube: Nose', abslimits = (0, 1000), fmtstr = '%.4G', pollinterval = 15, maxage = 60, lowlevel = True, unit = 'mbar', ), coll_pump = device('nicos.devices.generic.ManualMove', description = 'pressure collimation tube: Pump', abslimits = (0, 1000), fmtstr = '%.4G', pollinterval = 15, maxage = 60, lowlevel = True, unit = 'mbar', ), )
28.269231
63
0.542857
0433d8a6fe3adde21da874f20482a09af670d149
3,366
py
Python
neurolang/utils/testing/logic.py
hndgzkn/NeuroLang
a3178d47f80bc0941440d9bb09e06c2f217b9566
[ "BSD-3-Clause" ]
1
2021-01-07T02:00:22.000Z
2021-01-07T02:00:22.000Z
neurolang/utils/testing/logic.py
hndgzkn/NeuroLang
a3178d47f80bc0941440d9bb09e06c2f217b9566
[ "BSD-3-Clause" ]
207
2020-11-04T12:51:10.000Z
2022-03-30T13:42:26.000Z
neurolang/utils/testing/logic.py
hndgzkn/NeuroLang
a3178d47f80bc0941440d9bb09e06c2f217b9566
[ "BSD-3-Clause" ]
6
2020-11-04T13:59:35.000Z
2021-03-19T05:28:10.000Z
""" This module exposes utility functions for tests on logic expressions. It should not be used for any other purpose than testing. """ from ...expression_pattern_matching import add_match from ...expression_walker import ExpressionWalker from ...expressions import Definition, Expression from ...logic import NaryLogicOperator __all__ = [ "logic_exp_commutative_equal", ] class LogicCommutativeComparison(Definition): """ Comparison between two expressions that uses the commutativity property of some logic operators such as conjunctions and disjunctions. Parameters ---------- first : Expression First expression. second : Expression Second expression. """ def __init__(self, first, second): self.first = first self.second = second def __repr__(self): return "Compare\n\t{}\nwith\n\t{}".format( repr(self.first), repr(self.second) ) class LogicCommutativeComparator(ExpressionWalker): """ Compare logic expressions using the commutativity property of some logic operators such as conjunctions and disjunctions. """ @add_match( LogicCommutativeComparison(NaryLogicOperator, NaryLogicOperator) ) def nary_logic_operators(self, comp): """ Compare two n-ary logic operators by comparing their two sets of formulas. """ if not isinstance(comp.first, type(comp.second)) or not isinstance( comp.second, type(comp.first) ): return False return self._compare_set_of_formulas(comp.first, comp.second) @add_match(LogicCommutativeComparison(Expression, Expression)) def expressions(self, comp): args1 = comp.first.unapply() args2 = comp.second.unapply() if len(args1) != len(args2): return False for arg1, arg2 in zip(args1, args2): if not self._args_equal(arg1, arg2): return False return True def _args_equal(self, arg1, arg2): if isinstance(arg1, Expression) and isinstance(arg2, Expression): if not self.walk(LogicCommutativeComparison(arg1, arg2)): return False elif arg1 != arg2: return False return True def _compare_set_of_formulas(self, first, second): return all( any( self.walk(LogicCommutativeComparison(f1, f2)) for f2 in second.formulas ) for f1 in first.formulas ) def logic_exp_commutative_equal(exp1, exp2): """ Compare two expressions using the commutativity property of logic operators. The two expressions do not need to be purely equal if the order of the formulas of a commutative logic operator is not the same in the two expressions. Apart from commutative logic operators, the comparison between the two expressions remains the same as the equality comparison. Parameters ---------- exp1 : Expression First expression. exp2 : Expression Second expression. """ if not isinstance(exp1, Expression) or not isinstance(exp2, Expression): raise ValueError("Can only compare expressions") return LogicCommutativeComparator().walk( LogicCommutativeComparison(exp1, exp2) )
28.285714
78
0.655674
15e199b22e341cb7cab56a47709641d697da9e73
2,734
py
Python
tests/test_edn.py
ciena-blueplanet/pydatomic
6e49d5a4d9716392eaeb8647e1da21eb300d5380
[ "MIT" ]
56
2015-01-14T16:38:37.000Z
2022-02-24T10:54:53.000Z
tests/test_edn.py
ciena-blueplanet/pydatomic
6e49d5a4d9716392eaeb8647e1da21eb300d5380
[ "MIT" ]
null
null
null
tests/test_edn.py
ciena-blueplanet/pydatomic
6e49d5a4d9716392eaeb8647e1da21eb300d5380
[ "MIT" ]
10
2015-01-27T02:53:03.000Z
2021-12-06T11:30:24.000Z
# -*- coding: utf-8 -*- import unittest from datetime import datetime from uuid import UUID from pydatomic import edn class EdnParseTest(unittest.TestCase): def test_all_data(self): data = { '"helloworld"': "helloworld", "23": 23, "23.11": 23.11, "true": True, "false": False, "nil": None, ":hello": ":hello", r'"string\"ing"': 'string"ing', '"string\n"': 'string\n', '[:hello]':(":hello",), '-10.4':-10.4, '"你"': u'你', '\\€': u'€', "[1 2]": (1, 2), "#{true \"hello\" 12}": set([True, "hello", 12]), '#inst "2012-09-10T23:51:55.840-00:00"': datetime(2012, 9, 10, 23, 51, 55, 840000), "(\\a \\b \\c \\d)": ("a", "b", "c", "d"), "{:a 1 :b 2 :c 3 :d 4}": {":a":1, ":b":2, ":c":3,":d":4}, "[1 2 3,4]": (1,2,3,4), "{:a [1 2 3] :b #{23.1 43.1 33.1}}": {":a":(1, 2, 3), ":b":frozenset([23.1, 43.1, 33.1])}, "{:a 1 :b [32 32 43] :c 4}": {":a":1, ":b":(32,32,43), ":c":4}, "\\你": u"你", '#db/fn{:lang "clojure" :code "(map l)"}': {':lang':u'clojure', ':code':u'(map l)'}, "#_ {[#{}] #{[]}} [23[34][32][4]]": (23, (34,), (32,), (4,)), '(:graham/stratton true \n , "A string with \\n \\"s" true #uuid "f81d4fae7dec11d0a76500a0c91e6bf6")': ( u':graham/stratton', True, u'A string with \n "s', True, UUID('f81d4fae-7dec-11d0-a765-00a0c91e6bf6') ), '[\space \\\xE2\x82\xAC [true []] ;true\n[true #inst "2012-09-10T23:39:43.309-00:00" true ""]]': ( ' ', u'\u20ac', (True, ()), (True, datetime(2012, 9, 10, 23, 39, 43, 309000), True, '') ), ' {true false nil [true, ()] 6 {#{nil false} {nil \\newline} }}': { None: (True, ()), True: False, 6: {frozenset([False, None]): {None: '\n'}} }, '[#{6.22e-18, -3.1415, 1} true #graham #{"pie" "chips"} "work"]': ( frozenset([6.22e-18, -3.1415, 1]), True, u'work' ), '(\\a .5)': (u'a', 0.5), '(List #{[123 456 {}] {a 1 b 2 c ({}, [])}})': ( u'List', ((123, 456, {}), {u'a': 1, u'c': ({}, ()), u'b': 2}) ), } for k, v in data.items(): self.assertEqual(edn.loads(k), v) def test_malformed_data(self): '''Verify ValueError() exception raise on malformed data''' data = ["[1 2 3", "@EE", "[@nil tee]"] for d in data: self.assertRaises(ValueError, edn.loads, d) if __name__ == '__main__': unittest.main()
41.424242
117
0.41368
3ad157ffd25a76d559494e3b24db09b4d1ba2ef8
1,032
py
Python
ophelia/voicerooms/config_options.py
Bunnic/Ophelia
7a521ca8cef1e067b6e402db16911b554057ce0d
[ "MIT" ]
null
null
null
ophelia/voicerooms/config_options.py
Bunnic/Ophelia
7a521ca8cef1e067b6e402db16911b554057ce0d
[ "MIT" ]
null
null
null
ophelia/voicerooms/config_options.py
Bunnic/Ophelia
7a521ca8cef1e067b6e402db16911b554057ce0d
[ "MIT" ]
null
null
null
""" Voicerooms Configuration module. Contains the options required to set up a voiceroom generator. """ from typing import List from ophelia.output import ConfigItem, disp_str from ophelia.utils.discord_utils import ( extract_category_config, extract_text_config, extract_voice_config ) VOICEROOMS_GENERATOR_CONFIG: List[ConfigItem] = [] for category in ["voice_category", "text_category"]: VOICEROOMS_GENERATOR_CONFIG.append(ConfigItem( category, disp_str(f"voicerooms_generator_{category}"), extract_category_config )) for voice_channel in ["generator_channel", "sample_voice_channel"]: VOICEROOMS_GENERATOR_CONFIG.append(ConfigItem( voice_channel, disp_str(f"voicerooms_generator_{voice_channel}"), extract_voice_config )) for text_channel in ["sample_text_channel", "log_channel"]: VOICEROOMS_GENERATOR_CONFIG.append(ConfigItem( text_channel, disp_str(f"voicerooms_generator_{text_channel}"), extract_text_config ))
27.157895
67
0.745155
17d8273c73888cc04c224429611b598d929de315
1,262
py
Python
regression_test_utils/regression_test_utils.py
JivanAmara/test_utils
f077083ebdd8cbcd626ef98994c582cf585fde14
[ "BSD-3-Clause" ]
null
null
null
regression_test_utils/regression_test_utils.py
JivanAmara/test_utils
f077083ebdd8cbcd626ef98994c582cf585fde14
[ "BSD-3-Clause" ]
null
null
null
regression_test_utils/regression_test_utils.py
JivanAmara/test_utils
f077083ebdd8cbcd626ef98994c582cf585fde14
[ "BSD-3-Clause" ]
null
null
null
''' Created on Jul 29, 2015 @author: jivan ''' import jsonpickle, logging # PythonDecorators/my_decorator.py class log_test_case(object): """ @brief: Decorator to log input & output of a method as a jsonpickle'd tuple for easy test creation. Format of the tuple is (<method name>, <args (without self)>, <kwargs>, <result>) @author: Jivan @since: 2015-07-29 @change: 2015-08-03 by Jivan: Added class_name to initialization & logged output. """ def __init__(self, logger, class_name): self.logger = logger self.class_name = class_name def __call__(self, f): method_name = f.__name__ logger = self.logger def wrapped_f(*args, **kwargs): result = f(*args, **kwargs) if logger.getEffectiveLevel() <= logging.DEBUG: args_wo_instance = args[1:] tc = repr(jsonpickle.encode( (method_name, args_wo_instance, kwargs, result), keys=True ) ) logger.debug('Decorator TestCase for "{}.{}":\n\t{}'\ .format(self.class_name, method_name, tc)) return result return wrapped_f
35.055556
92
0.561014
095a8c4c739fb420c16da1e1ae8240d1d72e1c59
798
py
Python
Python/Assignments/week3.py
aquib-sh/DSA-C-PY
0cc9e874d5310762edd7b6c12dee07e351668c17
[ "CC0-1.0" ]
null
null
null
Python/Assignments/week3.py
aquib-sh/DSA-C-PY
0cc9e874d5310762edd7b6c12dee07e351668c17
[ "CC0-1.0" ]
null
null
null
Python/Assignments/week3.py
aquib-sh/DSA-C-PY
0cc9e874d5310762edd7b6c12dee07e351668c17
[ "CC0-1.0" ]
null
null
null
def remdup(li): length = len(li) holder = [] if length <= 1: return li for i in range(0, length): if i == length-1: holder.append(li[i]) else: if not li[i] in li[(i+1):]: holder.append(li[i]) return holder def splitsum(l): pos = 0 neg = 0 for i in range(0, len(l)): if l[i] < 0: neg += l[i]**3 else: pos += l[i]**2 return [pos, neg] def matrixflip(m, d): nm = [] if d == 'h': for elem in m: nm.append([elem[i] for i in range(len(elem)-1, -1, -1)]) if d == 'v': for i in range(len(m)-1, -1, -1): nm.append(m[i]) return nm
17.733333
68
0.384712
b0e650d33133e60c097f26b1e8671202dfc39782
4,133
py
Python
api/streamlit_experiments/s3.py
aws-samples/aws-open-data-analytics-notebooks
680e9689e1b0ceb047960662d220564ae3ecbddb
[ "Apache-2.0" ]
70
2019-05-09T20:02:13.000Z
2021-04-03T12:09:18.000Z
api/streamlit_experiments/s3.py
aws-samples/cloud-experiments
680e9689e1b0ceb047960662d220564ae3ecbddb
[ "Apache-2.0" ]
14
2021-05-15T21:14:28.000Z
2022-03-31T09:09:11.000Z
api/streamlit_experiments/s3.py
aws-samples/aws-open-data-analytics-notebooks
680e9689e1b0ceb047960662d220564ae3ecbddb
[ "Apache-2.0" ]
65
2019-05-20T00:48:04.000Z
2021-04-24T02:28:08.000Z
import streamlit as st import boto3 import botocore import pandas as pd import io s3_client = boto3.client('s3') s3_resource = boto3.resource('s3') def search_buckets(): search = st.text_input('Search S3 bucket in your account', '') response = s3_client.list_buckets() if search: buckets_found = 0 for bucket in response['Buckets']: if search: if search in bucket["Name"]: buckets_found = buckets_found + 1 st.write(f'{bucket["Name"]}') if buckets_found: st.success(f'Listing existing **{buckets_found}** buckets containing **{search}** string') else: st.warning(f'No matching buckets found containing **{search}** string') else: st.info('Provide string to search for listing buckets') def list_bucket_contents(): total_size_gb = 0 total_files = 0 match_size_gb = 0 match_files = 0 bucket = st.text_input('S3 bucket name (public bucket or private to your account)', '') bucket_resource = s3_resource.Bucket(bucket) match = st.text_input('(optional) Filter bucket contents with matching string', '') size_mb = st.text_input('(optional) Match files up to size in MB (0 for all sizes)', '0') if size_mb: size_mb = int(size_mb) else: size_mb = 0 if bucket: for key in bucket_resource.objects.all(): key_size_mb = key.size/1024/1024 total_size_gb += key_size_mb total_files += 1 list_check = False if not match: list_check = True elif match in key.key: list_check = True if list_check and not size_mb: match_files += 1 match_size_gb += key_size_mb st.write(f'{key.key} ({key_size_mb:3.0f}MB)') elif list_check and key_size_mb <= size_mb: match_files += 1 match_size_gb += key_size_mb st.write(f'{key.key} ({key_size_mb:3.0f}MB)') if match: st.info(f'Matched file size is **{match_size_gb/1024:3.1f}GB** with **{match_files}** files') st.success(f'Bucket **{bucket}** total size is **{total_size_gb/1024:3.1f}GB** with **{total_files}** files') else: st.info('Provide bucket name to list contents') def create_bucket(): bucket = st.text_input('S3 bucket name to create', '') if bucket: try: s3_client.create_bucket(Bucket=bucket) except botocore.exceptions.ClientError as e: st.error('Bucket **' + bucket + '** could not be created. ' + e.response['Error']['Message']) return st.success('The S3 bucket **' + bucket + '** successfully created or already exists in your account') else: st.info('Provide unique bucket name to create') def s3_select(): bucket = st.text_input('S3 bucket name', '') csv = st.text_input('CSV File path and name', '') st.write("Example: `SELECT * FROM s3object s LIMIT 5`") sql = st.text_area('SQL statement', '') if bucket and csv and sql: s3_select_results = s3_client.select_object_content( Bucket=bucket, Key=csv, Expression=sql, ExpressionType='SQL', InputSerialization={'CSV': {"FileHeaderInfo": "Use"}}, OutputSerialization={'JSON': {}}, ) for event in s3_select_results['Payload']: if 'Records' in event: df = pd.read_json(io.StringIO(event['Records']['Payload'].decode('utf-8')), lines=True) elif 'Stats' in event: st.write(f"Scanned: {int(event['Stats']['Details']['BytesScanned'])/1024/1024:5.2f}MB") st.write(f"Processed: {int(event['Stats']['Details']['BytesProcessed'])/1024/1024:5.2f}MB") st.write(f"Returned: {int(event['Stats']['Details']['BytesReturned'])/1024/1024:5.2f}MB") st.write(df) else: st.info('Provide S3 bucket, CSV file name, and SQL statement')
39.361905
117
0.583837
0e506a262abbfab83584566410dfe7ec665436a4
4,172
py
Python
tests/unit/bokeh/models/test_mappers.py
tcmetzger/bokeh
5daff21bfb7e10b69ff9aa2f35eb506777a38264
[ "BSD-3-Clause" ]
null
null
null
tests/unit/bokeh/models/test_mappers.py
tcmetzger/bokeh
5daff21bfb7e10b69ff9aa2f35eb506777a38264
[ "BSD-3-Clause" ]
null
null
null
tests/unit/bokeh/models/test_mappers.py
tcmetzger/bokeh
5daff21bfb7e10b69ff9aa2f35eb506777a38264
[ "BSD-3-Clause" ]
null
null
null
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- import pytest ; pytest #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Bokeh imports from _util_models import check_properties_existence from bokeh.palettes import Spectral6 # Module under test import bokeh.models.mappers as bmm # isort:skip #----------------------------------------------------------------------------- # Setup #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- class Test_CategoricalColorMapper: def test_basic(self) -> None: mapper = bmm.CategoricalColorMapper() check_properties_existence(mapper, [ "factors", "palette", "start", "end", "nan_color"], ) def test_warning_with_short_palette(self, recwarn) -> None: bmm.CategoricalColorMapper(factors=["a", "b", "c"], palette=["red", "green"]) assert len(recwarn) == 1 def test_no_warning_with_long_palette(self, recwarn) -> None: bmm.CategoricalColorMapper(factors=["a", "b", "c"], palette=["red", "green", "orange", "blue"]) assert len(recwarn) == 0 def test_with_pandas_index(self, pd) -> None: fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries'] years = ['2015', '2016', '2017'] data = {'2015' : [2, 1, 4, 3, 2, 4], '2016' : [5, 3, 3, 2, 4, 6], '2017' : [3, 2, 4, 4, 5, 3]} df = pd.DataFrame(data, index=fruits) fruits = df.index years = df.columns m = bmm.CategoricalColorMapper(palette=Spectral6, factors=years, start=1, end=2) assert list(m.factors) == list(years) assert isinstance(m.factors, pd.Index) class Test_CategoricalPatternMapper: def test_basic(self) -> None: mapper = bmm.CategoricalPatternMapper() check_properties_existence(mapper, [ "factors", "patterns", "start", "end", "default_value"], ) class Test_CategoricalMarkerMapper: def test_basic(self) -> None: mapper = bmm.CategoricalMarkerMapper() check_properties_existence(mapper, [ "factors", "markers", "start", "end", "default_value"], ) class Test_LinearColorMapper: def test_basic(self) -> None: mapper = bmm.LinearColorMapper() check_properties_existence(mapper, [ "palette", "low", "high", "low_color", "high_color", "nan_color"], ) class Test_LogColorMapper: def test_basic(self) -> None: mapper = bmm.LogColorMapper() check_properties_existence(mapper, [ "palette", "low", "high", "low_color", "high_color", "nan_color"], ) #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
32.59375
103
0.394775
a4e32ef9c8adc091f8f4325ae63ce3419162c50b
3,647
py
Python
genedisco/evaluation/hitratio.py
genedisco/genedisco
26b7ce93b222fd80e914f2f2236969b356e7f701
[ "Apache-2.0" ]
11
2022-02-07T13:19:02.000Z
2022-03-25T03:38:15.000Z
genedisco/evaluation/hitratio.py
genedisco/genedisco
26b7ce93b222fd80e914f2f2236969b356e7f701
[ "Apache-2.0" ]
4
2022-02-05T19:12:30.000Z
2022-03-18T09:12:35.000Z
genedisco/evaluation/hitratio.py
genedisco/genedisco
26b7ce93b222fd80e914f2f2236969b356e7f701
[ "Apache-2.0" ]
6
2022-02-07T16:14:54.000Z
2022-03-18T22:26:31.000Z
""" Copyright (C) 2022 Arash Mehrjou, GlaxoSmithKline plc Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import os import pickle import numpy as np from typing import Optional, AnyStr from slingpy.evaluation.metrics.abstract_metric import AbstractMetric class HitRatio(AbstractMetric): """ A metric to measure the ratio of the top mover genes selected by the acquisition function. """ def get_abbreviation(self) -> AnyStr: return "HR" @staticmethod def evaluate(top_movers_filepath:AnyStr, super_dir_to_cycle_dirs: AnyStr) -> np.ndarray: with open(top_movers_filepath, "rb") as f: top_mover_indices = pickle.load(f) top_mover_set = set(top_mover_indices) num_top_hits = len(top_mover_indices) num_AL_cycles = get_num_AL_cycles(super_dir_to_cycle_dirs) selected_indices_per_cycle = get_cumulative_selected_indices( super_dir_to_cycle_dirs) cumulative_top_hit_ratio = [] for c in range(num_AL_cycles): selected_indices = selected_indices_per_cycle[c] num_of_hits = num_top_hits - len(top_mover_set - set(selected_indices)) cumulative_top_hit_ratio.append(num_of_hits/num_top_hits) return cumulative_top_hit_ratio[-1] # returns the top hit ratio of the current cycle def get_cumulative_selected_indices(super_dir_to_cycle_dirs: AnyStr): """ Get a list of selected indiced at cycles of active learning. Args: super_dir_to_cycle_dirs: The dir in which the cycle dirs are saved. seed: The seed of the experiment. Return a concatenated list of the saved selected indices so far. """ num_AL_cycles = get_num_AL_cycles(super_dir_to_cycle_dirs) selected_indices_per_cycles = [] for c in range(num_AL_cycles): filename = os.path.join(super_dir_to_cycle_dirs, "cycle_" + str(c), "selected_indices.pickle") with open(filename, "rb") as f: selected_indices = pickle.load(f) # selected_indices = [x.decode("utf-8") for x in selected_indices] # Uncomment this line if the stored Gene names are byte strings. selected_indices_per_cycles.append(selected_indices) return selected_indices_per_cycles def get_num_AL_cycles(super_dir_to_cycle_dirs: AnyStr): """Get the number of cycles stored in the provided dir. """ all_subdirs = list(os.walk(super_dir_to_cycle_dirs))[0][1] cycle_subdirs = [folder_name for folder_name in all_subdirs if folder_name.startswith("cycle")] num_AL_cycles = len(cycle_subdirs) return num_AL_cycles
47.986842
143
0.732931
12fe26a4af0f0a8758ed418b3d06127b37fa4ad8
920
py
Python
manager/projects/migrations/0016_auto_20201016_0326.py
jlbrewe/hub
c737669e6493ad17536eaa240bed3394b20c6b7d
[ "Apache-2.0" ]
30
2016-03-26T12:08:04.000Z
2021-12-24T14:48:32.000Z
manager/projects/migrations/0016_auto_20201016_0326.py
jlbrewe/hub
c737669e6493ad17536eaa240bed3394b20c6b7d
[ "Apache-2.0" ]
1,250
2016-03-23T04:56:50.000Z
2022-03-28T02:27:58.000Z
manager/projects/migrations/0016_auto_20201016_0326.py
jlbrewe/hub
c737669e6493ad17536eaa240bed3394b20c6b7d
[ "Apache-2.0" ]
11
2016-07-14T17:04:20.000Z
2021-07-01T16:19:09.000Z
# Generated by Django 3.1.2 on 2020-10-16 03:26 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('projects', '0015_auto_20201007_0337'), ] operations = [ migrations.RemoveField( model_name='googledrivesource', name='folder_id', ), migrations.AddField( model_name='googledrivesource', name='google_id', field=models.TextField(default='', help_text='The id of the file or folder.'), preserve_default=False, ), migrations.AddField( model_name='googledrivesource', name='kind', field=models.CharField(choices=[('file', 'File'), ('folder', 'Folder')], default='folder', help_text='The kind of Google Drive resource: file or folder.', max_length=16), preserve_default=False, ), ]
30.666667
182
0.594565
a201ac4aa8fba548a2db478ed74b26f9d6a8d17b
12,945
py
Python
mvpnet/train_3d.py
shnhrtkyk/mvpnet
cadf636749b5ee6e73e96ff68e4b32728088decd
[ "MIT" ]
79
2020-01-12T20:30:34.000Z
2022-03-15T06:37:09.000Z
mvpnet/train_3d.py
jtpils/mvpnet
cadf636749b5ee6e73e96ff68e4b32728088decd
[ "MIT" ]
4
2020-02-14T17:26:56.000Z
2021-08-30T07:54:47.000Z
mvpnet/train_3d.py
jtpils/mvpnet
cadf636749b5ee6e73e96ff68e4b32728088decd
[ "MIT" ]
10
2020-01-13T05:59:15.000Z
2021-11-02T03:00:22.000Z
#!/usr/bin/env python import os import os.path as osp import sys import argparse import logging import time import socket import warnings import open3d # import before torch import torch from torch import nn from torch.utils.tensorboard import SummaryWriter # Assume that the script is run at the root directory _ROOT_DIR = os.path.abspath(osp.dirname(__file__) + '/..') sys.path.insert(0, _ROOT_DIR) from common.solver.build import build_optimizer, build_scheduler from common.nn.freezer import Freezer from common.utils.checkpoint import CheckpointerV2 from common.utils.logger import setup_logger from common.utils.metric_logger import MetricLogger from common.utils.torch_util import set_random_seed from common.utils.sampler import IterationBasedBatchSampler from mvpnet.models.build import build_model_sem_seg_3d from mvpnet.data.build import build_dataloader def parse_args(): parser = argparse.ArgumentParser(description='PyTorch 3D Deep Learning Training') parser.add_argument( '--cfg', dest='config_file', default='', metavar='FILE', help='path to config file', type=str, ) parser.add_argument( 'opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() return args def train(cfg, output_dir='', run_name=''): # ---------------------------------------------------------------------------- # # Build models, optimizer, scheduler, checkpointer, etc. # It is recommended not to modify this section. # ---------------------------------------------------------------------------- # logger = logging.getLogger('mvpnet.train') # build model set_random_seed(cfg.RNG_SEED) model, loss_fn, train_metric, val_metric = build_model_sem_seg_3d(cfg) logger.info('Build model:\n{}'.format(str(model))) num_params = sum(param.numel() for param in model.parameters()) print('#Parameters: {:.2e}'.format(num_params)) num_gpus = torch.cuda.device_count() if num_gpus > 1: model = nn.DataParallel(model).cuda() elif num_gpus == 1: model = model.cuda() else: raise NotImplementedError('Not support cpu training now.') # build optimizer # model_cfg = cfg.MODEL[cfg.MODEL.TYPE] optimizer = build_optimizer(cfg, model) # build lr scheduler scheduler = build_scheduler(cfg, optimizer) # build checkpointer # Note that checkpointer will load state_dict of model, optimizer and scheduler. checkpointer = CheckpointerV2(model, optimizer=optimizer, scheduler=scheduler, save_dir=output_dir, logger=logger, max_to_keep=cfg.TRAIN.MAX_TO_KEEP) checkpoint_data = checkpointer.load(cfg.RESUME_PATH, resume=cfg.AUTO_RESUME, resume_states=cfg.RESUME_STATES) ckpt_period = cfg.TRAIN.CHECKPOINT_PERIOD # build freezer if cfg.TRAIN.FROZEN_PATTERNS: freezer = Freezer(model, cfg.TRAIN.FROZEN_PATTERNS) freezer.freeze(verbose=True) # sanity check else: freezer = None # build data loader # Reset the random seed again in case the initialization of models changes the random state. set_random_seed(cfg.RNG_SEED) train_dataloader = build_dataloader(cfg, mode='train') val_period = cfg.VAL.PERIOD val_dataloader = build_dataloader(cfg, mode='val') if val_period > 0 else None # build tensorboard logger (optionally by comment) if output_dir: tb_dir = osp.join(output_dir, 'tb.{:s}'.format(run_name)) summary_writier = SummaryWriter(tb_dir) else: summary_writier = None # ---------------------------------------------------------------------------- # # Train # Customization begins here. # ---------------------------------------------------------------------------- # max_iteration = cfg.SCHEDULER.MAX_ITERATION start_iteration = checkpoint_data.get('iteration', 0) best_metric_name = 'best_{}'.format(cfg.VAL.METRIC) best_metric = checkpoint_data.get(best_metric_name, None) logger.info('Start training from iteration {}'.format(start_iteration)) # add metrics if not isinstance(train_metric, (list, tuple)): train_metric = [train_metric] if not isinstance(val_metric, (list, tuple)): val_metric = [val_metric] train_metric_logger = MetricLogger(delimiter=' ') train_metric_logger.add_meters(train_metric) val_metric_logger = MetricLogger(delimiter=' ') val_metric_logger.add_meters(val_metric) # wrap the dataloader batch_sampler = train_dataloader.batch_sampler train_dataloader.batch_sampler = IterationBasedBatchSampler(batch_sampler, max_iteration, start_iteration) def setup_train(): # set training mode model.train() loss_fn.train() # freeze parameters/modules optionally if freezer is not None: freezer.freeze() # reset metric train_metric_logger.reset() def setup_validate(): # set evaluate mode model.eval() loss_fn.eval() # reset metric val_metric_logger.reset() setup_train() end = time.time() for iteration, data_batch in enumerate(train_dataloader, start_iteration): data_time = time.time() - end # copy data from cpu to gpu data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items()} # forward preds = model(data_batch) # update losses optimizer.zero_grad() loss_dict = loss_fn(preds, data_batch) total_loss = sum(loss_dict.values()) # It is slightly faster to update metrics and meters before backward with torch.no_grad(): train_metric_logger.update(loss=total_loss, **loss_dict) for metric in train_metric: metric.update_dict(preds, data_batch) # backward total_loss.backward() if cfg.OPTIMIZER.MAX_GRAD_NORM > 0: # CAUTION: built-in clip_grad_norm_ clips the total norm. nn.utils.clip_grad_norm_(model.parameters(), max_norm=cfg.OPTIMIZER.MAX_GRAD_NORM) optimizer.step() batch_time = time.time() - end train_metric_logger.update(time=batch_time, data=data_time) cur_iter = iteration + 1 # log if cur_iter == 1 or (cfg.TRAIN.LOG_PERIOD > 0 and cur_iter % cfg.TRAIN.LOG_PERIOD) == 0: logger.info( train_metric_logger.delimiter.join( [ 'iter: {iter:4d}', '{meters}', 'lr: {lr:.2e}', 'max mem: {memory:.0f}', ] ).format( iter=cur_iter, meters=str(train_metric_logger), lr=optimizer.param_groups[0]['lr'], memory=torch.cuda.max_memory_allocated() / (1024.0 ** 2), ) ) # summary if summary_writier is not None and cfg.TRAIN.SUMMARY_PERIOD > 0 and cur_iter % cfg.TRAIN.SUMMARY_PERIOD == 0: keywords = ('loss', 'acc', 'iou') for name, meter in train_metric_logger.meters.items(): if all(k not in name for k in keywords): continue summary_writier.add_scalar('train/' + name, meter.global_avg, global_step=cur_iter) # checkpoint if (ckpt_period > 0 and cur_iter % ckpt_period == 0) or cur_iter == max_iteration: checkpoint_data['iteration'] = cur_iter checkpoint_data[best_metric_name] = best_metric checkpointer.save('model_{:06d}'.format(cur_iter), **checkpoint_data) # ---------------------------------------------------------------------------- # # validate for one epoch # ---------------------------------------------------------------------------- # if val_period > 0 and (cur_iter % val_period == 0 or cur_iter == max_iteration): start_time_val = time.time() setup_validate() end = time.time() with torch.no_grad(): for iteration_val, data_batch in enumerate(val_dataloader): data_time = time.time() - end # copy data from cpu to gpu data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items()} # forward preds = model(data_batch) # update losses and metrics loss_dict = loss_fn(preds, data_batch) total_loss = sum(loss_dict.values()) # update metrics and meters val_metric_logger.update(loss=total_loss, **loss_dict) for metric in val_metric: metric.update_dict(preds, data_batch) batch_time = time.time() - end val_metric_logger.update(time=batch_time, data=data_time) end = time.time() if cfg.VAL.LOG_PERIOD > 0 and iteration_val % cfg.VAL.LOG_PERIOD == 0: logger.info( val_metric_logger.delimiter.join( [ 'iter: {iter:4d}', '{meters}', 'max mem: {memory:.0f}', ] ).format( iter=iteration, meters=str(val_metric_logger), memory=torch.cuda.max_memory_allocated() / (1024.0 ** 2), ) ) epoch_time_val = time.time() - start_time_val logger.info('Iteration[{}]-Val {} total_time: {:.2f}s'.format( cur_iter, val_metric_logger.summary_str, epoch_time_val)) # summary if summary_writier is not None: keywords = ('loss', 'acc', 'iou') for name, meter in val_metric_logger.meters.items(): if all(k not in name for k in keywords): continue summary_writier.add_scalar('val/' + name, meter.global_avg, global_step=cur_iter) # best validation if cfg.VAL.METRIC in val_metric_logger.meters: cur_metric = val_metric_logger.meters[cfg.VAL.METRIC].global_avg if best_metric is None \ or ('loss' not in cfg.VAL.METRIC and cur_metric > best_metric) \ or ('loss' in cfg.VAL.METRIC and cur_metric < best_metric): best_metric = cur_metric checkpoint_data['iteration'] = cur_iter checkpoint_data[best_metric_name] = best_metric checkpointer.save('model_best', tag=False, **checkpoint_data) # restore training setup_train() # since pytorch v1.1.0, lr_scheduler is called after optimization. if scheduler is not None: scheduler.step() end = time.time() logger.info('Best val-{} = {}'.format(cfg.VAL.METRIC, best_metric)) return model def main(): args = parse_args() # load the configuration # import on-the-fly to avoid overwriting cfg from common.config import purge_cfg from mvpnet.config.sem_seg_3d import cfg cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) purge_cfg(cfg) cfg.freeze() output_dir = cfg.OUTPUT_DIR # replace '@' with config path if output_dir: config_path = osp.splitext(args.config_file)[0] output_dir = output_dir.replace('@', config_path.replace('configs', 'outputs')) if osp.isdir(output_dir): warnings.warn('Output directory exists.') os.makedirs(output_dir, exist_ok=True) # run name timestamp = time.strftime('%m-%d_%H-%M-%S') hostname = socket.gethostname() run_name = '{:s}.{:s}'.format(timestamp, hostname) logger = setup_logger('mvpnet', output_dir, comment='train.{:s}'.format(run_name)) logger.info('{:d} GPUs available'.format(torch.cuda.device_count())) logger.info(args) from common.utils.misc import collect_env_info logger.info('Collecting env info (might take some time)\n' + collect_env_info()) logger.info('Loaded configuration file {:s}'.format(args.config_file)) logger.info('Running with config:\n{}'.format(cfg)) assert cfg.TASK == 'sem_seg_3d' train(cfg, output_dir, run_name) if __name__ == '__main__': main()
38.641791
117
0.579452
README.md exists but content is empty.
Downloads last month
42