hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
48f41747a3d58f3d1a448281f5a63064e6b8c97f | 203 | py | Python | lab02/sum.py | gddmatos/iaed21 | 8bfe153d27b4bc579c4fce94f330ba1b87046f0b | [
"MIT"
] | 10 | 2021-03-01T12:37:47.000Z | 2022-03-06T14:08:18.000Z | lab02/sum.py | gddmatos/iaed21 | 8bfe153d27b4bc579c4fce94f330ba1b87046f0b | [
"MIT"
] | null | null | null | lab02/sum.py | gddmatos/iaed21 | 8bfe153d27b4bc579c4fce94f330ba1b87046f0b | [
"MIT"
] | 16 | 2021-03-19T18:29:29.000Z | 2021-09-17T17:13:19.000Z | #!/usr/bin/python
# vim: set fileencoding=UTF-8
sum = 0
a = input("? ")
while a.lstrip('-').isdigit() :
sum = sum + int(a)
a = input("? ")
print(sum)
# end with a non-integer value: 1. (for example)
| 20.3 | 48 | 0.596059 |
64e9e4afd52b7e1ef990d4bb82a5fbc7e728fff2 | 93 | py | Python | src/auditlog/__init__.py | alasco-tech/django-auditlog | 36eab26779bc8b8cf0b57d0007b73c7ad8ba8b67 | [
"MIT"
] | 3 | 2018-03-19T07:21:08.000Z | 2020-07-01T15:23:32.000Z | src/auditlog/__init__.py | alasco-tech/django-auditlog | 36eab26779bc8b8cf0b57d0007b73c7ad8ba8b67 | [
"MIT"
] | 23 | 2020-02-12T02:35:49.000Z | 2022-02-11T03:45:40.000Z | src/auditlog/__init__.py | alasco-tech/django-auditlog | 36eab26779bc8b8cf0b57d0007b73c7ad8ba8b67 | [
"MIT"
] | 5 | 2020-04-25T15:04:27.000Z | 2020-08-28T10:47:20.000Z | from __future__ import unicode_literals
default_app_config = 'auditlog.apps.AuditlogConfig'
| 23.25 | 51 | 0.860215 |
401c005e257d317327e7d2ccc515e1b1c4f9f003 | 11,219 | py | Python | fpakman/view/qt/thread.py | vinifmor/fpakman | a719991b8f7ecf366d44fdf074f5950767bdf121 | [
"Zlib"
] | 39 | 2019-06-15T08:27:12.000Z | 2021-11-08T03:33:01.000Z | fpakman/view/qt/thread.py | vinifmor/fpakman | a719991b8f7ecf366d44fdf074f5950767bdf121 | [
"Zlib"
] | 10 | 2019-06-16T12:16:19.000Z | 2020-06-21T18:49:05.000Z | fpakman/view/qt/thread.py | vinifmor/fpakman | a719991b8f7ecf366d44fdf074f5950767bdf121 | [
"Zlib"
] | 3 | 2019-08-01T12:38:46.000Z | 2020-04-30T20:40:23.000Z | import subprocess
import time
from datetime import datetime, timedelta
from typing import List
import requests
from PyQt5.QtCore import QThread, pyqtSignal
from fpakman.core.controller import ApplicationManager
from fpakman.core.exception import NoInternetException
from fpakman.core.model import ApplicationStatus
from fpakman.core.system import FpakmanProcess
from fpakman.util.cache import Cache
from fpakman.view.qt import dialog
from fpakman.view.qt.view_model import ApplicationView
class AsyncAction(QThread):
def notify_subproc_outputs(self, proc: FpakmanProcess, signal) -> bool:
"""
:param subproc:
:param signal:
:param success:
:return: if the subprocess succeeded
"""
signal.emit(' '.join(proc.subproc.args) + '\n')
success, already_succeeded = True, False
for output in proc.subproc.stdout:
line = output.decode().strip()
if line:
signal.emit(line)
if proc.success_pgrase and proc.success_pgrase in line:
already_succeeded = True
if already_succeeded:
return True
for output in proc.subproc.stderr:
line = output.decode().strip()
if line:
if proc.wrong_error_phrase and proc.wrong_error_phrase in line:
continue
else:
success = False
signal.emit(line)
return success
class UpdateSelectedApps(AsyncAction):
signal_finished = pyqtSignal(bool, int)
signal_status = pyqtSignal(str)
signal_output = pyqtSignal(str)
def __init__(self, manager: ApplicationManager, apps_to_update: List[ApplicationView] = None):
super(UpdateSelectedApps, self).__init__()
self.apps_to_update = apps_to_update
self.manager = manager
def run(self):
success = False
for app in self.apps_to_update:
self.signal_status.emit(app.model.base_data.name)
process = self.manager.update_and_stream(app.model)
success = self.notify_subproc_outputs(process, self.signal_output)
if not success:
break
else:
self.signal_output.emit('\n')
self.signal_finished.emit(success, len(self.apps_to_update))
self.apps_to_update = None
class RefreshApps(QThread):
signal = pyqtSignal(list)
def __init__(self, manager: ApplicationManager):
super(RefreshApps, self).__init__()
self.manager = manager
def run(self):
self.signal.emit(self.manager.read_installed())
class UninstallApp(AsyncAction):
signal_finished = pyqtSignal(object)
signal_output = pyqtSignal(str)
def __init__(self, manager: ApplicationManager, icon_cache: Cache, app: ApplicationView = None):
super(UninstallApp, self).__init__()
self.app = app
self.manager = manager
self.icon_cache = icon_cache
self.root_password = None
def run(self):
if self.app:
process = self.manager.uninstall_and_stream(self.app.model, self.root_password)
success = self.notify_subproc_outputs(process, self.signal_output)
if success:
self.icon_cache.delete(self.app.model.base_data.icon_url)
self.manager.clean_cache_for(self.app.model)
self.signal_finished.emit(self.app if success else None)
self.app = None
self.root_password = None
class DowngradeApp(AsyncAction):
signal_finished = pyqtSignal(bool)
signal_output = pyqtSignal(str)
def __init__(self, manager: ApplicationManager, locale_keys: dict, app: ApplicationView = None):
super(DowngradeApp, self).__init__()
self.manager = manager
self.app = app
self.locale_keys = locale_keys
self.root_password = None
def run(self):
if self.app:
success = False
try:
process = self.manager.downgrade_app(self.app.model, self.root_password)
if process is None:
dialog.show_error(title=self.locale_keys['popup.downgrade.impossible.title'],
body=self.locale_keys['popup.downgrade.impossible.body'])
else:
success = self.notify_subproc_outputs(process, self.signal_output)
except (requests.exceptions.ConnectionError, NoInternetException):
success = False
self.signal_output.emit(self.locale_keys['internet.required'])
finally:
self.app = None
self.root_password = None
self.signal_finished.emit(success)
class GetAppInfo(QThread):
signal_finished = pyqtSignal(dict)
def __init__(self, manager: ApplicationManager, app: ApplicationView = None):
super(GetAppInfo, self).__init__()
self.app = app
self.manager = manager
def run(self):
if self.app:
info = {'__app__': self.app}
info.update(self.manager.get_info(self.app.model))
self.signal_finished.emit(info)
self.app = None
class GetAppHistory(QThread):
signal_finished = pyqtSignal(dict)
def __init__(self, manager: ApplicationManager, locale_keys: dict, app: ApplicationView = None):
super(GetAppHistory, self).__init__()
self.app = app
self.manager = manager
self.locale_keys = locale_keys
def run(self):
if self.app:
try:
res = {'model': self.app.model, 'history': self.manager.get_history(self.app.model)}
self.signal_finished.emit(res)
except (requests.exceptions.ConnectionError, NoInternetException):
self.signal_finished.emit({'error': self.locale_keys['internet.required']})
finally:
self.app = None
class SearchApps(QThread):
signal_finished = pyqtSignal(list)
def __init__(self, manager: ApplicationManager):
super(SearchApps, self).__init__()
self.word = None
self.manager = manager
def run(self):
apps_found = []
if self.word:
res = self.manager.search(self.word)
apps_found.extend(res['installed'])
apps_found.extend(res['new'])
self.signal_finished.emit(apps_found)
self.word = None
class InstallApp(AsyncAction):
signal_finished = pyqtSignal(object)
signal_output = pyqtSignal(str)
def __init__(self, manager: ApplicationManager, disk_cache: bool, icon_cache: Cache, locale_keys: dict, app: ApplicationView = None):
super(InstallApp, self).__init__()
self.app = app
self.manager = manager
self.icon_cache = icon_cache
self.disk_cache = disk_cache
self.locale_keys = locale_keys
self.root_password = None
def run(self):
if self.app:
success = False
try:
process = self.manager.install_and_stream(self.app.model, self.root_password)
success = self.notify_subproc_outputs(process, self.signal_output)
if success and self.disk_cache:
self.app.model.installed = True
if self.app.model.supports_disk_cache():
icon_data = self.icon_cache.get(self.app.model.base_data.icon_url)
self.manager.cache_to_disk(app=self.app.model,
icon_bytes=icon_data.get('bytes') if icon_data else None,
only_icon=False)
except (requests.exceptions.ConnectionError, NoInternetException):
success = False
self.signal_output.emit(self.locale_keys['internet.required'])
finally:
self.signal_finished.emit(self.app if success else None)
self.app = None
class AnimateProgress(QThread):
signal_change = pyqtSignal(int)
def __init__(self):
super(AnimateProgress, self).__init__()
self.progress_value = 0
self.increment = 5
self.stop = False
def run(self):
current_increment = self.increment
while not self.stop:
self.signal_change.emit(self.progress_value)
if self.progress_value == 100:
current_increment = -current_increment
if self.progress_value == 0:
current_increment = self.increment
self.progress_value += current_increment
time.sleep(0.05)
self.progress_value = 0
class VerifyModels(QThread):
signal_updates = pyqtSignal()
def __init__(self, apps: List[ApplicationView] = None):
super(VerifyModels, self).__init__()
self.apps = apps
def run(self):
if self.apps:
stop_at = datetime.utcnow() + timedelta(seconds=30)
last_ready = 0
while True:
current_ready = 0
for app in self.apps:
current_ready += 1 if app.model.status == ApplicationStatus.READY else 0
if current_ready > last_ready:
last_ready = current_ready
self.signal_updates.emit()
if current_ready == len(self.apps):
self.signal_updates.emit()
break
if stop_at <= datetime.utcnow():
break
time.sleep(0.1)
self.apps = None
class RefreshApp(AsyncAction):
signal_finished = pyqtSignal(bool)
signal_output = pyqtSignal(str)
def __init__(self, manager: ApplicationManager, app: ApplicationView = None):
super(RefreshApp, self).__init__()
self.app = app
self.manager = manager
self.root_password = None
def run(self):
if self.app:
success = False
try:
process = self.manager.refresh(self.app.model, self.root_password)
success = self.notify_subproc_outputs(process, self.signal_output)
except (requests.exceptions.ConnectionError, NoInternetException):
success = False
self.signal_output.emit(self.locale_keys['internet.required'])
finally:
self.app = None
self.signal_finished.emit(success)
class FindSuggestions(AsyncAction):
signal_finished = pyqtSignal(list)
def __init__(self, man: ApplicationManager):
super(FindSuggestions, self).__init__()
self.man = man
def run(self):
self.signal_finished.emit(self.man.list_suggestions(limit=-1))
class ListWarnings(QThread):
signal_warnings = pyqtSignal(list)
def __init__(self, man: ApplicationManager, locale_keys: dict):
super(QThread, self).__init__()
self.locale_keys = locale_keys
self.man = man
def run(self):
warnings = self.man.list_warnings()
if warnings:
self.signal_warnings.emit(warnings)
| 30.736986 | 137 | 0.612176 |
c8f6e362bd7d5e598cffda759314e3ae185bd628 | 277 | py | Python | samples/kitchen-sink-on-aws/glue/hello/hello_world.py | ihafidh/dataops-infra | 7dd73534fac9e7aeb2fea0d546c583aa2ca6e1f3 | [
"MIT"
] | 12 | 2020-02-29T03:54:54.000Z | 2020-12-03T08:16:04.000Z | samples/kitchen-sink-on-aws/glue/hello/hello_world.py | ihafidh/dataops-infra | 7dd73534fac9e7aeb2fea0d546c583aa2ca6e1f3 | [
"MIT"
] | 99 | 2019-12-08T19:54:30.000Z | 2020-12-27T01:30:58.000Z | samples/kitchen-sink-on-aws/glue/hello/hello_world.py | ihafidh/dataops-infra | 7dd73534fac9e7aeb2fea0d546c583aa2ca6e1f3 | [
"MIT"
] | 30 | 2020-02-27T20:58:37.000Z | 2020-10-30T14:13:52.000Z | import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
glueContext = GlueContext(SparkContext.getOrCreate())
print("Hello, World!")
| 25.181818 | 53 | 0.830325 |
966beb973f6ccb93f7fc3e24092c65871311251a | 8,970 | py | Python | planar_magnetics/inductors/cffc.py | dzimmanck/KiCad-CFFC-Inductor | 7d28b9ecfd2d2f64c9412a1fbaeae4d930a760ac | [
"Apache-2.0"
] | 2 | 2022-03-29T03:18:40.000Z | 2022-03-30T16:48:41.000Z | planar_magnetics/inductors/cffc.py | dzimmanck/KiCad-CFFC-Inductor | 7d28b9ecfd2d2f64c9412a1fbaeae4d930a760ac | [
"Apache-2.0"
] | 1 | 2022-03-28T22:54:09.000Z | 2022-03-28T22:54:09.000Z | planar_magnetics/inductors/cffc.py | dzimmanck/KiCad-CFFC-Inductor | 7d28b9ecfd2d2f64c9412a1fbaeae4d930a760ac | [
"Apache-2.0"
] | null | null | null | import math
from planar_magnetics.geometry import Point
from planar_magnetics.cores import Core
from planar_magnetics.creepage import Classification, calculate_creepage
from planar_magnetics.kicad import Footprint, Pad, PadType, Reference, Value
from planar_magnetics.windings.single import TopTurn, InnerTurn, BottomTurn, ViaStrip
class Winding:
def __init__(
self,
at: Point,
inner_radius: float,
outer_radius: float,
number_layers: int,
gap: float = 0.5,
termination_width: float = None,
viastrip_width: float = 1,
):
self.number_layers = number_layers
if termination_width is None:
termination_width = outer_radius - inner_radius
# calculate other useful angles
inner_gap_angle = math.asin(gap / inner_radius)
term_angle = math.asin(termination_width / outer_radius)
# calculate the angle we can allocate to the via transitions
circumfrance_for_transitions = (
2 * math.pi - term_angle
) * inner_radius - number_layers * gap
angle_for_transitions = circumfrance_for_transitions / inner_radius
viastrip_angle = angle_for_transitions / (number_layers - 1)
# calculate other useful angles
inner_gap_angle = math.asin(gap / inner_radius)
term_angle = math.asin(termination_width / outer_radius)
# calculate the required rotation per turn
initial_rotation = (term_angle + inner_gap_angle) / 2
rotation_per_turn = viastrip_angle + inner_gap_angle
# create the top and bottom turns
top = TopTurn(
at,
inner_radius,
outer_radius,
gap,
termination_width,
viastrip_angle,
viastrip_width,
"F.Cu",
)
inners = [
InnerTurn(
at,
inner_radius,
outer_radius,
gap,
-n * rotation_per_turn - initial_rotation,
viastrip_angle,
viastrip_width,
f"In{n}.Cu",
)
for n in range(1, number_layers - 1)
]
bottom = BottomTurn(
at,
inner_radius,
outer_radius,
gap,
termination_width,
viastrip_angle,
viastrip_width,
"B.Cu",
)
self.turns = [top] + inners + [bottom]
# create the via strips
initial_angle = initial_rotation + inner_gap_angle / 2
layers = [(t.layer, b.layer) for t, b in zip(self.turns[0:-1], self.turns[1:])]
self.viastrips = [
ViaStrip(
at,
layers[n],
inner_radius,
-initial_angle - n * rotation_per_turn,
-initial_angle - n * rotation_per_turn - viastrip_angle,
0.8,
0.4,
)
for n in range(number_layers - 1)
]
def estimate_dcr(self, thicknesses: [float], rho: float = 1.68e-8):
"""Estimate the DC resistance of the winding
This function will estimate the DC resistance of the winding by calculating the estimated
dc resistance of each turn and adding the estimated inter-turn via resistance
Args:
thicknesses: The thickness of each layer in the winding
rho (float): The conductivity of the material used in the layer
Returns:
float: An estimation of the DC resistance in ohms
"""
assert (
len(thicknesses) == self.number_layers
), f"You need to specify 1 thickness for each layer, so len(thicknesses) should be {self.number_layers}, not{len(thicknesses)}"
resistance = 0
for thickness, turn in zip(thicknesses, self.turns):
resistance += turn.estimate_dcr(thickness, rho)
return resistance
def __str__(self):
turns = "\n".join(turn.__str__() for turn in self.turns)
vias = "\n".join(viastrip.__str__() for viastrip in self.viastrips)
expression = turns + vias
return expression
class Cffc:
def __init__(
self,
inner_radius: float,
outer_radius: float,
number_turns: int,
voltage: float,
classification: Classification = Classification.B4,
termination_width: float = None,
):
origin = Point(0, 0)
self.inner_radius = inner_radius
self.outer_radius = outer_radius
# calculate the required creepage distance
creapage = calculate_creepage(voltage, classification)
if termination_width is None:
self.termination_width = outer_radius - inner_radius
else:
self.termination_width = termination_width
self.number_turns = number_turns
self.number_layers = number_turns + 1
# create the windings
self.winding = Winding(
at=origin,
inner_radius=inner_radius,
outer_radius=outer_radius,
number_layers=self.number_layers,
gap=creapage,
termination_width=self.termination_width,
)
# create the core
edge_to_trace = 0.635
edge_to_core = 0.5
self.core = Core(
centerpost_radius=inner_radius - edge_to_trace - edge_to_core,
window_width=(outer_radius - inner_radius)
+ 2 * (edge_to_core + edge_to_trace),
window_height=6,
opening_width=self.termination_width + 2 * (edge_to_core + edge_to_trace),
gap=1,
)
def __str__(self):
cutouts = self.core.create_pcb_cutouts(Point(0, 0), 0.5)
windings_expr = self.winding.__str__()
cutouts_expr = "\n".join([cutout.__str__() for cutout in cutouts])
expression = self.winding.__str__() + self.core.__str__()
return expression
def estimate_dcr(self, thicknesses: [float], rho: float = 1.68e-8):
"""Estimate the DC resistance of the winding
This function will estimate the DC resistance of the winding by calculating the estimated
dc resistance of each turn and adding the estimated inter-turn via resistance
Args:
thicknesses: The thickness of each layer in the winding
rho (float): The conductivity of the material used in the layer
Returns:
float: An estimation of the DC resistance in ohms
"""
return self.winding.estimate_dcr(thicknesses, rho)
def to_kicad_footprint(self, name: str):
"""Export the Cffc inductor design as a KiCAD footprint file (*.kicad_mods)
"""
# vias are not allowed in KiCAD footprints, so convert the via strips to through-hole pads
pads = [
via.to_pad() for viastrip in self.winding.viastrips for via in viastrip.vias
]
# add the termination pads
location = Point(self.outer_radius + self.termination_width / 2, 0)
size = self.termination_width / 2
pads.extend(
[
Pad(PadType.SMD, 1, location, size, ("F.Cu",)),
Pad(PadType.SMD, 2, location, size, ("B.Cu",)),
]
)
# add the reference and value silkscreens
x_loc = self.core.width / 2 + 1
height_avail = (self.core.width - self.termination_width) / 2
font_size = min(2, height_avail / 4)
val_loc = Point(x_loc, self.termination_width / 2 + height_avail / 3)
ref_loc = Point(x_loc, self.termination_width / 2 + 2 * height_avail / 3)
reference = Reference(ref_loc, font_size)
value = Value(val_loc, font_size)
# create a footprint from the various elements
contents = (
self.core.create_pcb_cutouts()
+ self.winding.turns
+ pads
+ [reference, value]
)
footprint = Footprint(name, contents=contents)
# write the footprint to a file
fh = open(f"{name}.kicad_mod", "w")
fh.write(footprint.__str__())
fh.close()
if __name__ == "__main__":
from planar_magnetics.utils import weight_to_thickness
inductor = Cffc(inner_radius=4.9, outer_radius=9, number_turns=3, voltage=500)
# estimate the dc resistance of this inductor
# using the CFFC structure, a 5 turn inductor requires 6 layers
# assume we are using 1.5 oz on top/botton and 2oz on interior layers
thicknesses = [
weight_to_thickness(1.5),
weight_to_thickness(2),
weight_to_thickness(2),
weight_to_thickness(1.5),
]
dcr = inductor.estimate_dcr(thicknesses)
print(f"Estimated DCR of this inductor is {dcr*1e3} mOhms")
# create a complete KiCAD footprint
inductor.to_kicad_footprint("cffc_inductor")
inductor.core.to_step("core.step")
| 33.595506 | 135 | 0.604794 |
676797d592ca0eb3c176d59c6dd949d9d70c7f70 | 1,327 | py | Python | lucid/optvis/param/images.py | gabgoh/lucid | 643844807a41ac3bd9b972cdfb0a3f793c9c2d11 | [
"Apache-2.0"
] | 18 | 2019-02-04T20:57:37.000Z | 2021-03-30T17:05:21.000Z | lucid/optvis/param/images.py | gabgoh/lucid | 643844807a41ac3bd9b972cdfb0a3f793c9c2d11 | [
"Apache-2.0"
] | 2 | 2021-09-19T06:54:17.000Z | 2022-01-23T02:49:06.000Z | lucid/optvis/param/images.py | gabgoh/lucid | 643844807a41ac3bd9b972cdfb0a3f793c9c2d11 | [
"Apache-2.0"
] | 16 | 2019-02-11T22:05:23.000Z | 2021-09-19T06:53:42.000Z | # Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High-level wrapper for paramaterizing images."""
import tensorflow as tf
from lucid.optvis.param.color import to_valid_rgb
from lucid.optvis.param.spatial import pixel_image, fft_image
def image(w, h=None, batch=None, sd=None, decorrelate=True, fft=True, alpha=False):
h = h or w
batch = batch or 1
channels = 4 if alpha else 3
shape = [batch, w, h, channels]
param_f = fft_image if fft else pixel_image
t = param_f(shape, sd=sd)
rgb = to_valid_rgb(t[..., :3], decorrelate=decorrelate, sigmoid=True)
if alpha:
a = tf.nn.sigmoid(t[..., 3:])
return tf.concat([rgb, a], -1)
return rgb
| 35.864865 | 83 | 0.669179 |
cfaaf991002f1282cfc68c7e14782069f3696b42 | 7,661 | py | Python | apex/optimizers/fused_adam.py | oyj0594/apex | b66ffc1d952d0b20d6706ada783ae5b23e4ee734 | [
"BSD-3-Clause"
] | 23 | 2019-01-14T09:45:28.000Z | 2021-05-22T02:25:41.000Z | apex/optimizers/fused_adam.py | oyj0594/apex | b66ffc1d952d0b20d6706ada783ae5b23e4ee734 | [
"BSD-3-Clause"
] | 4 | 2021-06-08T21:14:36.000Z | 2022-03-12T00:23:24.000Z | apex/optimizers/fused_adam.py | oyj0594/apex | b66ffc1d952d0b20d6706ada783ae5b23e4ee734 | [
"BSD-3-Clause"
] | 12 | 2019-05-22T10:13:55.000Z | 2022-01-05T05:20:23.000Z | import torch
from apex.multi_tensor_apply import multi_tensor_applier
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm.
Currently GPU-only. Requires Apex to be installed via
``pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./``.
This version of fused Adam implements 2 fusions.
* Fusion of the Adam update's elementwise operations
* A multi-tensor apply launch that batches the elementwise updates applied to all the model's parameters into one or a few kernel launches.
:class:`apex.optimizers.FusedAdam` may be used as a drop-in replacement for ``torch.optim.Adam``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
...
opt.step()
:class:`apex.optimizers.FusedAdam` may be used with or without Amp. If you wish to use :class:`FusedAdam` with Amp,
you may choose any ``opt_level``::
opt = apex.optimizers.FusedAdam(model.parameters(), lr = ....)
model, opt = amp.initialize(model, opt, opt_level="O0" or "O1 or "O2")
...
opt.step()
In general, ``opt_level="O1"`` is recommended.
.. warning::
A previous version of :class:`FusedAdam` allowed a number of additional arguments to ``step``. These additional arguments
are now deprecated and unnecessary.
Adam was been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
adam_w_mode (boolean, optional): Apply L2 regularization or weight decay
True for decoupled weight decay(also known as AdamW) (default: True)
set_grad_none (bool, optional): whether set grad to None when zero_grad()
method is called. (default: True)
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, bias_correction=True,
betas=(0.9, 0.999), eps=1e-8, adam_w_mode=True,
weight_decay=0., amsgrad=False, set_grad_none=True):
if amsgrad:
raise RuntimeError('FusedAdam does not support the AMSGrad variant.')
defaults = dict(lr=lr, bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay)
super(FusedAdam, self).__init__(params, defaults)
self.adam_w_mode = 1 if adam_w_mode else 0
self.set_grad_none = set_grad_none
if multi_tensor_applier.available:
import amp_C
# Skip buffer
self._dummy_overflow_buf = torch.cuda.IntTensor([0])
self.multi_tensor_adam = amp_C.multi_tensor_adam
else:
raise RuntimeError('apex.optimizers.FusedAdam requires cuda extensions')
def zero_grad(self):
if self.set_grad_none:
for group in self.param_groups:
for p in group['params']:
p.grad = None
else:
super(FusedAdam, self).zero_grad()
def step(self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
The remaining arguments are deprecated, and are only retained (for the moment) for error-checking purposes.
"""
if any(p is not None for p in [grads, output_params, scale, grad_norms]):
raise RuntimeError('FusedAdam has been updated. Simply initialize it identically to torch.optim.Adam, and call step() with no arguments.')
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
bias_correction = 1 if group['bias_correction'] else 0
beta1, beta2 = group['betas']
# assume same step across group now to simplify things
# per parameter step can be easily support by making it tensor, or pass list into kernel
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
# create lists for multi-tensor apply
g_16, p_16, m_16, v_16 = [], [], [], []
g_32, p_32, m_32, v_32 = [], [], [], []
for p in group['params']:
if p.grad is None:
continue
if p.grad.data.is_sparse:
raise RuntimeError('FusedAdam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if p.dtype == torch.float16:
g_16.append(p.grad.data)
p_16.append(p.data)
m_16.append(state['exp_avg'])
v_16.append(state['exp_avg_sq'])
elif p.dtype == torch.float32:
g_32.append(p.grad.data)
p_32.append(p.data)
m_32.append(state['exp_avg'])
v_32.append(state['exp_avg_sq'])
else:
raise RuntimeError('FusedAdam only support fp16 and fp32.')
if(len(g_16) > 0):
multi_tensor_applier(self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_16, p_16, m_16, v_16],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
self.adam_w_mode,
bias_correction,
group['weight_decay'])
if(len(g_32) > 0):
multi_tensor_applier(self.multi_tensor_adam,
self._dummy_overflow_buf,
[g_32, p_32, m_32, v_32],
group['lr'],
beta1,
beta2,
group['eps'],
group['step'],
self.adam_w_mode,
bias_correction,
group['weight_decay'])
return loss
| 44.283237 | 151 | 0.551364 |
2a1706a3aeaa5c3b4efed883275cc6f967dcddce | 6,392 | py | Python | train.py | alissonferreirasv/tacotron | d48a535453b116cc43c1b6bdf8f21b0554a3650b | [
"MIT"
] | null | null | null | train.py | alissonferreirasv/tacotron | d48a535453b116cc43c1b6bdf8f21b0554a3650b | [
"MIT"
] | null | null | null | train.py | alissonferreirasv/tacotron | d48a535453b116cc43c1b6bdf8f21b0554a3650b | [
"MIT"
] | null | null | null | import argparse
from datetime import datetime
import math
import os
import subprocess
import time
import tensorflow as tf
import traceback
from datasets.datafeeder import DataFeeder
from hparams import hparams, hparams_debug_string
from models import create_model
from text import sequence_to_text
from util import audio, infolog, plot, ValueWindow
log = infolog.log
def get_git_commit():
subprocess.check_output(['git', 'diff-index', '--quiet', 'HEAD']) # Verify client is clean
commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()[:10]
log('Git commit: %s' % commit)
return commit
def add_stats(model):
with tf.variable_scope('stats') as scope:
tf.summary.histogram('linear_outputs', model.linear_outputs)
tf.summary.histogram('linear_targets', model.linear_targets)
tf.summary.histogram('mel_outputs', model.mel_outputs)
tf.summary.histogram('mel_targets', model.mel_targets)
tf.summary.scalar('loss_mel', model.mel_loss)
tf.summary.scalar('loss_linear', model.linear_loss)
tf.summary.scalar('learning_rate', model.learning_rate)
tf.summary.scalar('loss', model.loss)
gradient_norms = [tf.norm(grad) for grad in model.gradients]
tf.summary.histogram('gradient_norm', gradient_norms)
tf.summary.scalar('max_gradient_norm', tf.reduce_max(gradient_norms))
return tf.summary.merge_all()
def time_string():
return datetime.now().strftime('%Y-%m-%d %H:%M')
def train(log_dir, args):
commit = get_git_commit() if args.git else 'None'
checkpoint_path = os.path.join(log_dir, 'model.ckpt')
input_path = os.path.join(args.base_dir, args.input)
log('Checkpoint path: %s' % checkpoint_path)
log('Loading training data from: %s' % input_path)
log('Using model: %s' % args.model)
log(hparams_debug_string())
# Set up DataFeeder:
coord = tf.train.Coordinator()
with tf.variable_scope('datafeeder') as scope:
feeder = DataFeeder(coord, input_path, hparams)
# Set up model:
global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.variable_scope('model') as scope:
model = create_model(args.model, hparams)
model.initialize(feeder.inputs, feeder.input_lengths, feeder.mel_targets, feeder.linear_targets)
model.add_loss()
model.add_optimizer(global_step)
stats = add_stats(model)
# Bookkeeping:
step = 0
time_window = ValueWindow(100)
loss_window = ValueWindow(100)
saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=2)
# Train!
with tf.Session() as sess:
try:
summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
sess.run(tf.global_variables_initializer())
if args.restore_step:
# Restore from a checkpoint if the user requested it.
restore_path = '%s-%d' % (checkpoint_path, args.restore_step)
saver.restore(sess, restore_path)
log('Resuming from checkpoint: %s at commit: %s' % (restore_path, commit), slack=True)
else:
log('Starting new training run at commit: %s' % commit, slack=True)
feeder.start_in_session(sess)
while not coord.should_stop():
start_time = time.time()
step, loss, opt = sess.run([global_step, model.loss, model.optimize])
time_window.append(time.time() - start_time)
loss_window.append(loss)
message = 'Step %-7d [%.03f sec/step, loss=%.05f, avg_loss=%.05f]' % (
step, time_window.average, loss, loss_window.average)
log(message, slack=(step % args.checkpoint_interval == 0))
if loss > 100 or math.isnan(loss):
log('Loss exploded to %.05f at step %d!' % (loss, step), slack=True)
raise Exception('Loss Exploded')
if step % args.summary_interval == 0:
log('Writing summary at step: %d' % step)
summary_writer.add_summary(sess.run(stats), step)
if step % args.checkpoint_interval == 0:
log('Saving checkpoint to: %s-%d' % (checkpoint_path, step))
saver.save(sess, checkpoint_path, global_step=step)
log('Saving audio and alignment...')
input_seq, spectrogram, alignment = sess.run([
model.inputs[0], model.linear_outputs[0], model.alignments[0]])
waveform = audio.inv_spectrogram(spectrogram.T)
audio.save_wav(waveform, os.path.join(log_dir, 'step-%d-audio.wav' % step))
plot.plot_alignment(alignment, os.path.join(log_dir, 'step-%d-align.png' % step),
info='%s, %s, %s, step=%d, loss=%.5f' % (args.model, commit, time_string(), step, loss))
log('Input: %s' % sequence_to_text(input_seq))
except Exception as e:
log('Exiting due to exception: %s' % e, slack=True)
traceback.print_exc()
coord.request_stop(e)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--base_dir', default=os.path.expanduser('~/tacotron'))
parser.add_argument('--input', default='training/train.txt')
parser.add_argument('--model', default='tacotron')
parser.add_argument('--name', help='Name of the run. Used for logging. Defaults to model name.')
parser.add_argument('--hparams', default='',
help='Hyperparameter overrides as a comma-separated list of name=value pairs')
parser.add_argument('--restore_step', type=int, help='Global step to restore from checkpoint.')
parser.add_argument('--summary_interval', type=int, default=100,
help='Steps between running summary ops.')
parser.add_argument('--checkpoint_interval', type=int, default=1000,
help='Steps between writing checkpoints.')
parser.add_argument('--slack_url', help='Slack webhook URL to get periodic reports.')
parser.add_argument('--tf_log_level', type=int, default=1, help='Tensorflow C++ log level.')
parser.add_argument('--git', action='store_true', help='If set, verify that the client is clean.')
args = parser.parse_args()
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.tf_log_level)
run_name = args.name or args.model
log_dir = os.path.join(args.base_dir, 'logs-%s' % run_name)
os.makedirs(log_dir, exist_ok=True)
infolog.init(os.path.join(log_dir, 'train.log'), run_name, args.slack_url)
hparams.parse(args.hparams)
train(log_dir, args)
if __name__ == '__main__':
main() | 42.052632 | 101 | 0.679287 |
0ac7027da226bdebf0132607a63282c36be8c848 | 2,130 | py | Python | updater.py | krets/dns-updater | 3ef360be25b83edef549d48750d5d7bfbbd04bdb | [
"MIT"
] | null | null | null | updater.py | krets/dns-updater | 3ef360be25b83edef549d48750d5d7bfbbd04bdb | [
"MIT"
] | null | null | null | updater.py | krets/dns-updater | 3ef360be25b83edef549d48750d5d7bfbbd04bdb | [
"MIT"
] | null | null | null | """ Update a godaddy DNS entry for a dynamic IP
My shared hosting service keeps migrating my account to a new IP.
This is probably necessary for them to lower their operational costs,
but it sucks for me. I do not use their DNS, so I must manually update
my DNS provider which each change.
This script removes the need for me to pay much attention. I may just
run it daily to keep up-to-date.
[email protected]
"""
import logging
import json
import dns.resolver
from godaddypy import Client, Account
LOG = logging.getLogger('krets.dns')
def _config():
with open("config.json", 'r') as fh:
return json.load(fh)
class Resolver(dns.resolver.Resolver):
def address(self, name):
""" Convenience method to shorten interaction """
return self.query(name).response.answer[0].items[0].address
def main():
""" Find IPs from web host DNS and update godaddy DNS.
"""
config = _config()
resolver = Resolver()
resolver.nameservers = config['initial_nameservers']
LOG.debug("Resolving namdservers %s", config['nameservers'])
nameservers = [resolver.address(_) for _ in config['nameservers']]
resolver.nameservers = nameservers
addresses = {}
for domain in config['domains']:
addresses[domain] = resolver.address(domain)
LOG.debug("Found addresses: %s", addresses)
account = Account(**config['credentials'])
client = Client(account)
domains = client.get_domains()
for domain, address in addresses.items():
if domain not in domains:
raise ValueError("%s not in client list of domains" % domain)
current = client.get_records(domain)[0]['data']
if current != address:
LOG.info('updating %s (%s -> %s)', domain, current, address)
client.update_record_ip(address, domain, '@', 'A')
else:
LOG.info('Record up-to-date %s (%s)', domain, address)
LOG.debug("complete")
if __name__ == '__main__':
LOG.addHandler(logging.StreamHandler())
LOG.handlers[0].setFormatter(logging.Formatter(logging.BASIC_FORMAT))
LOG.setLevel(logging.DEBUG)
main()
| 30.869565 | 73 | 0.675117 |
7b6015f48829a8586ee025603d5ac386645b96f5 | 394 | py | Python | subjunctive/resource.py | kylelin47/subjunctive | 66c2bf0eae41e597bb2cc42f70cf018da78e2e6a | [
"MIT"
] | 1 | 2015-04-20T12:11:22.000Z | 2015-04-20T12:11:22.000Z | subjunctive/resource.py | kylelin47/subjunctive | 66c2bf0eae41e597bb2cc42f70cf018da78e2e6a | [
"MIT"
] | null | null | null | subjunctive/resource.py | kylelin47/subjunctive | 66c2bf0eae41e597bb2cc42f70cf018da78e2e6a | [
"MIT"
] | null | null | null | import os.path
import sdl2.ext
_paths = [os.path.dirname(__file__)]
def add_path(path):
_paths.append(path)
def image(name):
for path in _paths:
try:
surface = sdl2.ext.load_image(os.path.join(path, name))
except sdl2.ext.common.SDLError:
pass
else:
return surface
raise KeyError("image %r could not be found" % name)
| 20.736842 | 67 | 0.611675 |
65d8f597da62ed0c278fa2ab3252f0efab6c5376 | 16,329 | py | Python | src/test.py | CN-TU/PCC-Uspace | 6e3f6d696f9378688389ef74d106e4d0a8039d2d | [
"BSD-3-Clause"
] | 3 | 2020-10-21T12:39:25.000Z | 2021-08-07T16:06:10.000Z | src/test.py | CN-TU/PCC-Uspace | 6e3f6d696f9378688389ef74d106e4d0a8039d2d | [
"BSD-3-Clause"
] | null | null | null | src/test.py | CN-TU/PCC-Uspace | 6e3f6d696f9378688389ef74d106e4d0a8039d2d | [
"BSD-3-Clause"
] | null | null | null | """Example file for testing
This creates a small testnet with ipaddresses from 192.168.0.0/24,
one switch, and three hosts.
"""
import sys, os
import io
import time
import math
import signal
import numpy as np
import fnmatch
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
try:
del os.environ["START_VEGAS"]
except KeyError:
pass
try:
del os.environ["START_PCC_CLASSIC"]
except KeyError:
pass
try:
del os.environ["START_PCC"]
except KeyError:
pass
try:
del os.environ["ONLY_ONE_FLOW"]
except KeyError:
pass
import subprocess
import virtnet
import statistics
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--bytes_to_capture', type=int, default=100)
parser.add_argument('--delay', type=int, default=100)
parser.add_argument('--rate', type=float, default=8)
parser.add_argument('--time', type=float, default=10)
parser.add_argument('--qdisc', type=str, default="fq")
parser.add_argument('--cport', type=int, default=9000)
parser.add_argument('--buffer_size', type=int, default=10)
parser.add_argument('--how_many_values_per_parameter', type=int, default=5)
parser.add_argument('--run_scenario', type=str, default="")
parser.add_argument('--store_pcaps', action='store_true')
parser.add_argument('--competing_flow', action='store_true')
parser.add_argument('--two_iperfs', action='store_true')
parser.add_argument('--only_iperf', action='store_true')
opt = parser.parse_args()
print(opt)
def run_commands(cmds, Popen=False):
if type(cmds) is not list:
cmds = [cmds]
return_stuff = []
for cmd in cmds:
if type(cmd) is tuple:
cmd, kwargs = cmd
else:
kwargs = {}
try:
print("cmd", cmd)#, "kwargs", kwargs)
if not Popen:
output = subprocess.run(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True, **kwargs)
# print("output", output)
return_stuff.append(output)
else:
popen = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs)
return_stuff.append(popen)
except subprocess.CalledProcessError as e:
print(e.cmd, e.returncode, e.output)
raise e
return return_stuff
# print("os.environ", os.environ)
def execute_popen_and_show_result(command, host=None):
parent = host if host is not None else subprocess
print(f"Executing{f' on host {host.name}' if host else ''}", command)
with parent.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) as cmd:
out, err = cmd.stdout.read(), cmd.stderr.read()
if out:
print("out", out.decode("utf-8"))
if err:
print("err", err.decode("utf-8"))
number_of_seconds_the_competing_flow_starts_earlier = 5
def run(vnet, prefix=""):
start_time = int(time.time() * 1000)
"Main functionality"
# print("Calculating pdf...")
# x = np.linspace(-X, X, SAMPLES)
# y = norm.pdf(x, loc=-5)+norm.pdf(x, loc=5, scale=3)
# area = np.trapz(y)*(2*X)/SAMPLES
print("Building network...")
network = vnet.Network("192.168.0.0/24")
switch = vnet.Switch("sw")
hosts = []
for i in range(2):
host = vnet.Host("host{}".format(i))
host.connect(vnet.VirtualLink, switch, "eth0")
# print("switch.interfaces", switch.interfaces)
host["eth0"].add_ip(network)
execute_popen_and_show_result("ethtool -K eth0 gro off", host)
execute_popen_and_show_result("ethtool -K eth0 gso off", host)
execute_popen_and_show_result("ethtool -K eth0 tso off", host)
hosts.append(host)
# print("host", host)
# hosts[0]["eth0"].tc('add', 'netem', delay=DELAY, jitter=SIGMA, dist=y)
# import pdb; pdb.set_trace()
# print("switch.interfaces", switch.interfaces)
for interface in switch.interfaces:
print("interface", interface)
# continue
execute_popen_and_show_result(f"ethtool -K {interface} gro off")
execute_popen_and_show_result(f"ethtool -K {interface} gso off")
execute_popen_and_show_result(f"ethtool -K {interface} tso off")
run_commands([f"tc qdisc add dev {interface} root handle 1: netem{f' delay {int(round(opt.delay/2))}ms'}", f"tc qdisc add dev {interface} parent 1: handle 2: htb default 21", f"tc class add dev {interface} parent 2: classid 2:21 htb rate {opt.rate if interface=='host10' else 100}mbit", f"tc qdisc add dev {interface} parent 2:21 handle 3: {opt.qdisc if interface=='host10' else 'fq'}{f' flow_limit {int(math.ceil(opt.buffer_size))}' if (interface=='host10' and opt.qdisc=='fq') else ''}{f' limit {int(math.ceil(opt.buffer_size))}' if (interface=='host10' and opt.qdisc=='pfifo') else ''}"])
vnet.update_hosts()
for i in range(len(hosts)):
with hosts[i].Popen("tc qdisc show dev eth0".split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) as qdisc_info:
qdisc_info_output = qdisc_info.stdout.read().decode("utf-8").split("\n")
print(f"qdisc_info_output host {i}", qdisc_info_output)
with hosts[0].Popen("ping -c 100 -i 0 host1".split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) as ping:
ping_output = ping.stdout.read().decode("utf-8").split("\n")
ping_output = [float(item.split()[-2][5:]) for item in ping_output if "time=" in item]
mean_rtt = statistics.mean(ping_output)
print("mean rtt", mean_rtt)
assert mean_rtt >= opt.delay, f"mean_rtt: {mean_rtt}, opt.delay: {opt.delay}"
protocol_for_main_flow = "tcp"
if not opt.only_iperf:
if not opt.two_iperfs:
protocol_for_main_flow = "udp"
server_popen = hosts[1].Popen(f"./app/pccserver recv {opt.cport}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
server_popen = hosts[1].Popen("iperf3 -V -4 -s -p 5211".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if opt.competing_flow:
server_popen_iperf = hosts[1].Popen("iperf3 -V -4 -s".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
os.environ["file_name_for_logging"] = f"pcaps/{opt.qdisc}_{opt.delay}_{opt.rate}_{opt.time}_{start_time}.txt"
if opt.store_pcaps:
os.makedirs("pcaps", exist_ok=True)
tcpdump_sender_popens = []
tcpdump_receiver_popens = []
if not opt.only_iperf:
tcpdump_sender_popens.append(hosts[0].Popen(f"/usr/sbin/tcpdump -s {opt.bytes_to_capture} -i eth0 -w pcaps/sender_{prefix}_{protocol_for_main_flow}_port{opt.cport}_{opt.qdisc}_{opt.delay}_{opt.rate}_{opt.buffer_size}_{opt.time}_{start_time}.pcap dst port {opt.cport} or src port {opt.cport}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE))
tcpdump_receiver_popens.append(hosts[1].Popen(f"/usr/sbin/tcpdump -s {opt.bytes_to_capture} -i eth0 -w pcaps/receiver_{prefix}_{protocol_for_main_flow}_port{opt.cport}_{opt.qdisc}_{opt.delay}_{opt.rate}_{opt.buffer_size}_{opt.time}_{start_time}.pcap dst port {opt.cport} or src port {opt.cport}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE))
if opt.competing_flow:
tcpdump_sender_popens.append(hosts[0].Popen(f"/usr/sbin/tcpdump -s {opt.bytes_to_capture} -i eth0 -w pcaps/sender_{prefix}_tcp_port{opt.cport+10}_{opt.qdisc}_{opt.delay}_{opt.rate}_{opt.time}_{start_time}.pcap tcp and dst port {opt.cport+10} or src port {opt.cport+10}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE))
tcpdump_receiver_popens.append(hosts[1].Popen(f"/usr/sbin/tcpdump -s {opt.bytes_to_capture} -i eth0 -w pcaps/receiver_{prefix}_tcp_port{opt.cport+10}_{opt.qdisc}_{opt.delay}_{opt.rate}_{opt.time}_{start_time}.pcap tcp and dst port {opt.cport+10} or src port {opt.cport+10}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE))
if opt.competing_flow:
client_popen_iperf = hosts[0].Popen(f"iperf3 -V -4 -t {opt.time+number_of_seconds_the_competing_flow_starts_earlier} --cport {opt.cport+10} -c host1".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(number_of_seconds_the_competing_flow_starts_earlier)
if not opt.only_iperf:
if not opt.two_iperfs:
client_popen = hosts[0].Popen(f"./app/pccclient send host1 {opt.cport}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# client_popen = hosts[0].Popen(f"./app/pccclient send host1 {opt.cport}", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# client_popen = hosts[0].Popen(f"gdb --args ./app/pccclient send host1 {opt.cport}", shell=True)
else:
client_popen = hosts[0].Popen(f"iperf3 -V -4 -t {opt.time+number_of_seconds_the_competing_flow_starts_earlier} -p 5211 --cport {opt.cport} -c host1".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# client_popen.communicate()
time.sleep(opt.time)
if not opt.only_iperf:
# print("Terminating")
print("returncode before", client_popen.returncode)
client_popen.terminate()
print("returncode after", client_popen.returncode)
# import pdb; pdb.set_trace()
out, err = client_popen.stdout.read(), client_popen.stderr.read()
if out:
print("client out", out.decode("utf-8"))
if err:
print("client err", err.decode("utf-8"))
client_out = out
if opt.competing_flow:
client_popen_iperf.terminate()
out, err = client_popen_iperf.stdout.read(), client_popen_iperf.stderr.read()
if out:
print("client iperf out", out.decode("utf-8"))
if err:
print("client iperf err", err.decode("utf-8"))
else:
client_out = b""
if not opt.only_iperf:
server_popen.terminate()
out, err = server_popen.stdout.read(), server_popen.stderr.read()
if out:
print("server out", out.decode("utf-8"))
if err:
print("server err", err.decode("utf-8"))
if opt.competing_flow:
server_popen_iperf.terminate()
out, err = server_popen_iperf.stdout.read(), server_popen_iperf.stderr.read()
if out:
print("server iperf out", out.decode("utf-8"))
if err:
print("server iperf err", err.decode("utf-8"))
if opt.store_pcaps:
for tcpdump_sender_popen in tcpdump_sender_popens:
tcpdump_sender_popen.terminate()
out, err = tcpdump_sender_popen.stdout.read(), tcpdump_sender_popen.stderr.read()
if out:
print("tcpdump out", out.decode("utf-8"))
if err:
print("tcpdump err", err.decode("utf-8"))
for tcpdump_receiver_popen in tcpdump_receiver_popens:
tcpdump_receiver_popen.terminate()
out, err = tcpdump_receiver_popen.stdout.read(), tcpdump_receiver_popen.stderr.read()
if out:
print("tcpdump out", out.decode("utf-8"))
if err:
print("tcpdump err", err.decode("utf-8"))
subprocess.check_output("chmod -R o+rw pcaps".split())
return client_out.decode("utf-8"), start_time
if opt.run_scenario == "":
with virtnet.Manager() as context:
run(context)
elif opt.run_scenario == "accuracy":
import sklearn.metrics
results_dict = {}
for bw_index, bw in enumerate(np.linspace(5,50,opt.how_many_values_per_parameter)):
for delay_index, delay in enumerate(np.linspace(10,100,opt.how_many_values_per_parameter)):
for buffer_index, buffer in enumerate(np.linspace(1,100,opt.how_many_values_per_parameter)):
for fq_index, fq in enumerate([False, True]):
opt.rate = int(round(bw))
opt.delay = int(round(delay))
opt.buffer_size = int(round(buffer))
opt.qdisc = "fq" if fq else "pfifo"
opt.time = 10
with virtnet.Manager() as context:
client_output, timestamp = run(context, "accuracy")
assert client_output != ""
contained_vegas = "Starting Vegas" in client_output
contained_pcc = "Starting PCC Classic" in client_output
results_dict[(bw, delay, buffer, fq)] = (contained_vegas, contained_pcc)
invalids = []
false_predictions = []
predictions = []
for (bw, delay, buffer, fq), (is_vegas, is_pcc) in results_dict.items():
is_invalid = (not is_vegas and not is_pcc)
if is_invalid:
invalids.append(((bw, delay, buffer, fq), (is_vegas, is_pcc)))
if not is_invalid:
predictions.append((fq, is_vegas))
if fq != is_vegas:
false_predictions.append(((bw, delay, buffer, fq), is_vegas))
print("invalids", len(invalids), "total", len(results_dict))
print("invalids", invalids)
confusion_matrix_input = list(zip(*predictions))
accuracy_score = sklearn.metrics.accuracy_score(*confusion_matrix_input)
print("accuracy_score", accuracy_score)
confusion_matrix = sklearn.metrics.confusion_matrix(*confusion_matrix_input)
print("confusion_matrix", confusion_matrix)
print("false_predictions", false_predictions)
elif opt.run_scenario == "evaluation":
results_dict = {}
opt.store_pcaps = True
for bw_index, bw in enumerate(np.linspace(5,50,opt.how_many_values_per_parameter)):
for delay_index, delay in enumerate(np.linspace(10,100,opt.how_many_values_per_parameter)):
for buffer_index, buffer in enumerate(np.linspace(1,100,opt.how_many_values_per_parameter)):
fq = True
opt.rate = int(round(bw))
opt.delay = int(round(delay))
opt.buffer_size = int(round(buffer))
opt.qdisc = "fq" if fq else "pfifo"
opt.time = 30
with virtnet.Manager() as context:
client_output, timestamp = run(context, "accuracy")
contained_vegas = "Starting Vegas" in client_output
assert opt.store_pcaps
files = []
for file in os.listdir('pcaps'):
if fnmatch.fnmatch(file, f'sender_*{timestamp}.pcap'):
files.append(file)
assert len(files) == 1, len(files)
command = f"python3 ./plot_rtt_and_bandwidth.py {files[0]} no_plotting"
# print("command", command)
output = subprocess.check_output(command.split())
# print("parsing output", output)
output_lines = output.decode("utf-8").split("\n")[:2]
throughput = float(output_lines[0].split(" ")[-1])
rtt = float(output_lines[1].split(" ")[-1])
print("throughput", throughput, "rtt", rtt)
results_dict[(bw, delay, buffer)] = (throughput, rtt, contained_vegas)
all_throughputs, all_delays, contained_vegas = zip(*results_dict.values())
print("total len", len(all_throughputs))
print("mean throughput", statistics.mean(all_throughputs), "stdev throughput", statistics.stdev(all_throughputs))
print("mean rtt", statistics.mean(all_delays), "stdev rtt", statistics.stdev(all_delays))
print("detection accuracy", sum(contained_vegas)/len(contained_vegas))
elif opt.run_scenario == "competing_flow":
opt.competing_flow = True
opt.time = 20
opt.store_pcaps = True
opt.buffer_size = 100
opt.rate = 50
opt.delay = 10
try:
del os.environ["START_VEGAS"]
except KeyError:
pass
try:
del os.environ["START_PCC_CLASSIC"]
except KeyError:
pass
try:
del os.environ["START_PCC"]
except KeyError:
pass
print("Starting fq experiment")
opt.qdisc = "fq"
opt.two_iperfs = False
with virtnet.Manager() as context:
client_output, timestamp = run(context, "competing_flow_fq")
try:
del os.environ["START_VEGAS"]
except KeyError:
pass
try:
del os.environ["START_PCC_CLASSIC"]
except KeyError:
pass
try:
del os.environ["START_PCC"]
except KeyError:
pass
opt.two_iperfs = True
print("Starting pfifo experiment")
opt.qdisc = "pfifo"
with virtnet.Manager() as context:
client_output, timestamp = run(context, "competing_flow_pfifo")
try:
del os.environ["START_VEGAS"]
except KeyError:
pass
try:
del os.environ["START_PCC_CLASSIC"]
except KeyError:
pass
try:
del os.environ["START_PCC"]
except KeyError:
pass
# os.environ["START_PCC_CLASSIC"] = "1"
opt.two_iperfs = True
# print("Starting fq experiment with PCC_CLASSIC")
print("Starting fq experiment with Cubic")
opt.qdisc = "fq"
with virtnet.Manager() as context:
client_output, timestamp = run(context, "competing_flow_fq_pcc")
try:
del os.environ["START_VEGAS"]
except KeyError:
pass
try:
del os.environ["START_PCC_CLASSIC"]
except KeyError:
pass
try:
del os.environ["START_PCC"]
except KeyError:
pass
opt.two_iperfs = False
os.environ["START_VEGAS"] = "1"
opt.qdisc = "pfifo"
print("Starting pfifo experiment with VEGAS")
with virtnet.Manager() as context:
client_output, timestamp = run(context, "competing_flow_pfifo_vegas")
elif opt.run_scenario == "just_one_flow":
opt.time = 20
opt.store_pcaps = True
opt.buffer_size = 100
opt.rate = 10
opt.delay = 10
os.environ["ONLY_ONE_FLOW"] = "1"
print("ours experiment")
opt.qdisc = "fq"
with virtnet.Manager() as context:
client_output, timestamp = run(context, "just_one_flow_vegas")
print("cubic experiment")
opt.qdisc = "fq"
opt.only_iperf = True
opt.competing_flow = True
with virtnet.Manager() as context:
client_output, timestamp = run(context, "just_one_flow_cubic") | 37.366133 | 594 | 0.716762 |
24c7ab5e0f868646dcdd5ca3d5ca6fb13bb090c9 | 36 | py | Python | tests/__init__.py | WandyYing/mussel | 61711ec07078ee089ba8011a8ef688beaee10de7 | [
"MIT"
] | null | null | null | tests/__init__.py | WandyYing/mussel | 61711ec07078ee089ba8011a8ef688beaee10de7 | [
"MIT"
] | 1 | 2021-12-15T16:28:37.000Z | 2021-12-15T16:28:37.000Z | tests/__init__.py | WandyYing/mussel | 61711ec07078ee089ba8011a8ef688beaee10de7 | [
"MIT"
] | null | null | null | """Unit test package for mussel."""
| 18 | 35 | 0.666667 |
9caaa1de83c1bcbcaea7d3ac04b4bc9655c02ea5 | 2,467 | py | Python | frappe/utils/bench_helper.py | dalwadani/frappe | 8843030374b711d3c72ab840713fdd1a21561bb5 | [
"MIT"
] | null | null | null | frappe/utils/bench_helper.py | dalwadani/frappe | 8843030374b711d3c72ab840713fdd1a21561bb5 | [
"MIT"
] | null | null | null | frappe/utils/bench_helper.py | dalwadani/frappe | 8843030374b711d3c72ab840713fdd1a21561bb5 | [
"MIT"
] | 1 | 2019-10-01T07:32:09.000Z | 2019-10-01T07:32:09.000Z | from __future__ import unicode_literals, print_function
import click
import frappe
import os
import json
import importlib
import frappe.utils
import traceback
click.disable_unicode_literals_warning = True
def main():
commands = get_app_groups()
commands.update({
'get-frappe-commands': get_frappe_commands,
'get-frappe-help': get_frappe_help
})
click.Group(commands=commands)(prog_name='bench')
def get_app_groups():
'''Get all app groups, put them in main group "frappe" since bench is
designed to only handle that'''
commands = dict()
for app in get_apps():
app_commands = get_app_commands(app)
if app_commands:
commands.update(app_commands)
ret = dict(frappe=click.group(name='frappe', commands=commands)(app_group))
return ret
def get_app_group(app):
app_commands = get_app_commands(app)
if app_commands:
return click.group(name=app, commands=app_commands)(app_group)
@click.option('--site')
@click.option('--profile', is_flag=True, default=False, help='Profile')
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
@click.option('--force', is_flag=True, default=False, help='Force')
@click.pass_context
def app_group(ctx, site=False, force=False, verbose=False, profile=False):
ctx.obj = {
'sites': get_sites(site),
'force': force,
'verbose': verbose,
'profile': profile
}
if ctx.info_name == 'frappe':
ctx.info_name = ''
def get_sites(site_arg):
if site_arg and site_arg == 'all':
return frappe.utils.get_sites()
else:
if site_arg:
return [site_arg]
if os.path.exists('currentsite.txt'):
with open('currentsite.txt') as f:
return [f.read().strip()]
def get_app_commands(app):
try:
app_command_module = importlib.import_module(app + '.commands')
except ImportError as e:
if not 'No module named' in str(e):
traceback.print_exc()
return []
ret = {}
for command in getattr(app_command_module, 'commands', []):
ret[command.name] = command
return ret
@click.command('get-frappe-commands')
def get_frappe_commands():
commands = list(get_app_commands('frappe').keys())
for app in get_apps():
app_commands = get_app_commands(app)
if app_commands:
commands.extend(app_commands.keys())
print(json.dumps(commands))
@click.command('get-frappe-help')
def get_frappe_help():
print(click.Context(get_app_groups()['frappe']).get_help())
def get_apps():
return frappe.get_all_apps(with_internal_apps=False, sites_path='.')
if __name__ == "__main__":
main()
| 25.697917 | 76 | 0.731658 |
8a9dee6cf1c805215415723e0c7077600c0d8ffb | 14,879 | py | Python | rlbase/algos/sac/sac.py | sahandrez/rlbase | e5ef2a8f4ef0d142c4de41a7aea1bf9bb7708e7e | [
"BSD-3-Clause"
] | null | null | null | rlbase/algos/sac/sac.py | sahandrez/rlbase | e5ef2a8f4ef0d142c4de41a7aea1bf9bb7708e7e | [
"BSD-3-Clause"
] | 4 | 2020-03-13T21:14:41.000Z | 2020-03-18T01:52:53.000Z | rlbase/algos/sac/sac.py | sahandrez/rlbase | e5ef2a8f4ef0d142c4de41a7aea1bf9bb7708e7e | [
"BSD-3-Clause"
] | null | null | null | from copy import deepcopy
import itertools
import numpy as np
import torch
from torch.optim import Adam
import gym
import time
import rlbase.algos.sac.core as core
from rlbase.utils.logx import EpochLogger
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for SAC agents.
"""
def __init__(self, obs_dim, act_dim, size):
self.obs_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.obs2_buf = np.zeros(core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(core.combined_shape(size, act_dim), dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done):
self.obs_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
batch = dict(obs=self.obs_buf[idxs],
obs2=self.obs2_buf[idxs],
act=self.act_buf[idxs],
rew=self.rew_buf[idxs],
done=self.done_buf[idxs])
return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in batch.items()}
def sac(env_fn, actor_critic=core.MLPActorCritic, ac_kwargs=dict(), seed=0,
steps_per_epoch=4000, epochs=100, replay_size=int(1e6), gamma=0.99,
polyak=0.995, lr=1e-3, alpha=0.2, batch_size=100, start_steps=10000,
update_after=1000, update_every=50, num_test_episodes=10, max_ep_len=1000,
logger_kwargs=dict(), save_freq=1):
"""
Soft Actor-Critic (SAC)
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: The constructor method for a PyTorch Module with an ``act``
method, a ``pi`` module, a ``q1`` module, and a ``q2`` module.
The ``act`` method and ``pi`` module should accept batches of
observations as inputs, and ``q1`` and ``q2`` should accept a batch
of observations and a batch of actions as inputs. When called,
``act``, ``q1``, and ``q2`` should return:
=========== ================ ======================================
Call Output Shape Description
=========== ================ ======================================
``act`` (batch, act_dim) | Numpy array of actions for each
| observation.
``q1`` (batch,) | Tensor containing one current estimate
| of Q* for the provided observations
| and actions. (Critical: make sure to
| flatten this!)
``q2`` (batch,) | Tensor containing the other current
| estimate of Q* for the provided observations
| and actions. (Critical: make sure to
| flatten this!)
=========== ================ ======================================
Calling ``pi`` should return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``a`` (batch, act_dim) | Tensor containing actions from policy
| given observations.
``logp_pi`` (batch,) | Tensor containing log probabilities of
| actions in ``a``. Importantly: gradients
| should be able to flow back into ``a``.
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the ActorCritic object
you provided to SAC.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
lr (float): Learning rate (used for both policy and value learning).
alpha (float): Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.)
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
update_after (int): Number of env interactions to collect before
starting to do gradient descent updates. Ensures replay buffer
is full enough for useful updates.
update_every (int): Number of env interactions that should elapse
between gradient descent updates. Note: Regardless of how long
you wait between updates, the ratio of env steps to gradient steps
is locked to 1.
num_test_episodes (int): Number of episodes to test the deterministic
policy at the end of each epoch.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
torch.manual_seed(seed)
np.random.seed(seed)
env, test_env = env_fn(), env_fn()
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Create actor-critic module and target networks
ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs)
ac_targ = deepcopy(ac)
# Freeze target networks with respect to optimizers (only update via polyak averaging)
for p in ac_targ.parameters():
p.requires_grad = False
# List of parameters for both Q-networks (save this for convenience)
q_params = itertools.chain(ac.q1.parameters(), ac.q2.parameters())
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Count variables (protip: try to get a feel for how different size networks behave!)
var_counts = tuple(core.count_vars(module) for module in [ac.pi, ac.q1, ac.q2])
logger.log('\nNumber of parameters: \t pi: %d, \t q1: %d, \t q2: %d\n'%var_counts)
# Set up function for computing SAC Q-losses
def compute_loss_q(data):
o, a, r, o2, d = data['obs'], data['act'], data['rew'], data['obs2'], data['done']
q1 = ac.q1(o,a)
q2 = ac.q2(o,a)
# Bellman backup for Q functions
with torch.no_grad():
# Target actions come from *current* policy
a2, logp_a2 = ac.pi(o2)
# Target Q-values
q1_pi_targ = ac_targ.q1(o2, a2)
q2_pi_targ = ac_targ.q2(o2, a2)
q_pi_targ = torch.min(q1_pi_targ, q2_pi_targ)
backup = r + gamma * (1 - d) * (q_pi_targ - alpha * logp_a2)
# MSE loss against Bellman backup
loss_q1 = ((q1 - backup)**2).mean()
loss_q2 = ((q2 - backup)**2).mean()
loss_q = loss_q1 + loss_q2
# Useful info for logging
q_info = dict(Q1Vals=q1.detach().numpy(),
Q2Vals=q2.detach().numpy())
return loss_q, q_info
# Set up function for computing SAC pi loss
def compute_loss_pi(data):
o = data['obs']
pi, logp_pi = ac.pi(o)
q1_pi = ac.q1(o, pi)
q2_pi = ac.q2(o, pi)
q_pi = torch.min(q1_pi, q2_pi)
# Entropy-regularized policy loss
loss_pi = (alpha * logp_pi - q_pi).mean()
# Useful info for logging
pi_info = dict(LogPi=logp_pi.detach().numpy())
return loss_pi, pi_info
# Set up optimizers for policy and q-function
pi_optimizer = Adam(ac.pi.parameters(), lr=lr)
q_optimizer = Adam(q_params, lr=lr)
# Set up model saving
logger.setup_pytorch_saver(ac)
def update(data):
# First run one gradient descent step for Q1 and Q2
q_optimizer.zero_grad()
loss_q, q_info = compute_loss_q(data)
loss_q.backward()
q_optimizer.step()
# Record things
logger.store(LossQ=loss_q.item(), **q_info)
# Freeze Q-networks so you don't waste computational effort
# computing gradients for them during the policy learning step.
for p in q_params:
p.requires_grad = False
# Next run one gradient descent step for pi.
pi_optimizer.zero_grad()
loss_pi, pi_info = compute_loss_pi(data)
loss_pi.backward()
pi_optimizer.step()
# Unfreeze Q-networks so you can optimize it at next DDPG step.
for p in q_params:
p.requires_grad = True
# Record things
logger.store(LossPi=loss_pi.item(), **pi_info)
# Finally, update target networks by polyak averaging.
with torch.no_grad():
for p, p_targ in zip(ac.parameters(), ac_targ.parameters()):
# NB: We use an in-place operations "mul_", "add_" to update target
# params, as opposed to "mul" and "add", which would make new tensors.
p_targ.data.mul_(polyak)
p_targ.data.add_((1 - polyak) * p.data)
def get_action(o, deterministic=False):
return ac.act(torch.as_tensor(o, dtype=torch.float32),
deterministic)
def test_agent():
for j in range(num_test_episodes):
o, d, ep_ret, ep_len = test_env.reset(), False, 0, 0
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time
o, r, d, _ = test_env.step(get_action(o, True))
ep_ret += r
ep_len += 1
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
# Prepare for interaction with environment
total_steps = steps_per_epoch * epochs
start_time = time.time()
o, ep_ret, ep_len = env.reset(), 0, 0
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
# Until start_steps have elapsed, randomly sample actions
# from a uniform distribution for better exploration. Afterwards,
# use the learned policy.
if t > start_steps:
a = get_action(o)
else:
a = env.action_space.sample()
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len==max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(o, a, r, o2, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
# End of trajectory handling
if d or (ep_len == max_ep_len):
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, ep_ret, ep_len = env.reset(), 0, 0
# Update handling
if t >= update_after and t % update_every == 0:
for j in range(update_every):
batch = replay_buffer.sample_batch(batch_size)
update(data=batch)
# End of epoch handling
if (t+1) % steps_per_epoch == 0:
epoch = (t+1) // steps_per_epoch
# Save model
if (epoch % save_freq == 0) or (epoch == epochs):
logger.save_state({'env': env}, None)
# Test the performance of the deterministic version of the agent.
test_agent()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('Q1Vals', with_min_and_max=True)
logger.log_tabular('Q2Vals', with_min_and_max=True)
logger.log_tabular('LogPi', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='HalfCheetah-v2')
parser.add_argument('--hid', type=int, default=256)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--exp_name', type=str, default='sac')
args = parser.parse_args()
from rlbase.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
torch.set_num_threads(torch.get_num_threads())
sac(lambda : gym.make(args.env), actor_critic=core.MLPActorCritic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l),
gamma=args.gamma, seed=args.seed, epochs=args.epochs,
logger_kwargs=logger_kwargs)
| 40.105121 | 90 | 0.581289 |
377f477f7c047b0e06b95826128d5a9cc1c64393 | 122 | py | Python | tests/test_helpers/sample.py | linkdd/triotp | 7726438da36255c983d999490109f104655fb3fe | [
"MIT"
] | 4 | 2021-11-26T21:39:17.000Z | 2022-03-04T09:32:07.000Z | tests/test_helpers/sample.py | linkdd/triotp | 7726438da36255c983d999490109f104655fb3fe | [
"MIT"
] | 1 | 2021-11-30T20:28:10.000Z | 2021-12-01T01:03:28.000Z | tests/test_helpers/sample.py | linkdd/triotp | 7726438da36255c983d999490109f104655fb3fe | [
"MIT"
] | null | null | null | from triotp.helpers import current_module
__module__ = current_module()
def get_module():
return current_module()
| 13.555556 | 41 | 0.770492 |
e55b7c8a2a34a3cdcad41fc1ec80341c87473842 | 19,791 | py | Python | venv/Lib/site-packages/caffe2/perfkernels/hp_emblookup_codegen.py | Westlanderz/AI-Plat1 | 1187c22819e5135e8e8189c99b86a93a0d66b8d8 | [
"MIT"
] | 1 | 2022-01-08T12:30:44.000Z | 2022-01-08T12:30:44.000Z | venv/Lib/site-packages/caffe2/perfkernels/hp_emblookup_codegen.py | Westlanderz/AI-Plat1 | 1187c22819e5135e8e8189c99b86a93a0d66b8d8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/caffe2/perfkernels/hp_emblookup_codegen.py | Westlanderz/AI-Plat1 | 1187c22819e5135e8e8189c99b86a93a0d66b8d8 | [
"MIT"
] | null | null | null |
import argparse
import sys
sizeof = {"float": 4, "at::Half": 2, "uint8_t": 1}
def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(regid, InType, use_weights, isa, prefetch):
code = []
if InType == "float":
code.append(
" vop%d = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (%d)), vop%d);" # noqa
% (regid, regid, regid)
)
elif InType == "at::Half":
code.append(
" vop%d = _mm256_fmadd_ps(\n"
" vwgt,\n"
" _mm256_cvtph_ps(\n"
" _mm_loadu_si128(reinterpret_cast<const __m128i*>(ip + (%d)))),\n" # noqa
" vop%d);" % (regid, regid, regid)
)
elif InType == "uint8_t":
code.append(
" vop%d = _mm256_fmadd_ps(\n"
" vwgt,\n"
" _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(\n"
" _mm_loadl_epi64(reinterpret_cast<const __m128i*>(ip + (%d))))),\n" # noqa
" _mm256_add_ps(vop%d, vbio));" % (regid, regid, regid)
)
else:
assert False
if prefetch:
code.append(
" _mm_prefetch(\n"
" reinterpret_cast<const char*>(&ip_next_T0[%d]), _MM_HINT_T0);"
% (regid)
)
else:
code.append(
" // skip unnecessary prefetch of (&ip_next_T0[%d])" % (regid)
)
return code
code = []
code.append(" // unrolling " + str(uf) + " times")
if use_offsets:
code.append(
" for ("
+ IndexType
+ " rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) {"
)
else:
code.append(
" for ("
+ IndexType
+ " rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) {"
)
code.append(" " + OutType + "* op = &out[rangeIndex * block_size];")
for i in range(0, uf):
j = 8 * i
code.append(" __m256 vop" + str(j) + " = _mm256_setzero_ps();")
# inner loop
if use_offsets:
code.append(
" if (dataInd != offsets[rangeIndex] - offsets[0]) {\n"
+ " return false;\n"
+ " }"
)
code.append("""\
int64_t end_offset = offsets[rangeIndex + 1];
int64_t length = end_offset - offsets[rangeIndex];""")
code.append(
" for ("
+ "int64_t"
+ " start = dataInd; dataInd < end_offset - offsets[0];\n ++dataInd) {" # noqa
)
else:
code.append(
" if (dataInd + lengths[rangeIndex] > index_size) {\n"
+ " return false;\n"
+ " }"
)
code.append(
" for ("
+ IndexType
+ " start = dataInd; dataInd < start + lengths[rangeIndex];\n ++dataInd) {" # noqa
)
code.append(" const " + IndexType + " idx = indices[dataInd];")
code.append(
" if (idx < 0 || idx >= data_size) {\n"
+ " return false;\n"
+ " }"
)
if InType == "uint8_t":
code.append(" " + OutType + " wgt = 1.f;")
code.append(" " + OutType + " bio;")
code.append(" if (weights) {")
code.append(
" wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];" # noqa
)
code.append(" }")
if fused:
code.append(
" const float* scale_bias = reinterpret_cast<const float*>(\n"
" &input[idx * fused_block_size + block_size]);"
)
code.append(" bio = wgt * scale_bias[1];")
code.append(" wgt = wgt * scale_bias[0];")
else:
code.append(" bio = wgt * scale_bias[2 * idx + 1];")
code.append(" wgt = wgt * scale_bias[2 * idx];")
code.append(" __m256 vbio = _mm256_set1_ps(bio);")
else:
code.append(" " + OutType + " wgt = 1.f;")
code.append(" if (weights) {")
code.append(
" wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];" # noqa
)
code.append(" }")
code.append(" __m256 vwgt = _mm256_set1_ps(wgt);")
code.append(" const {}* ip = &input[idx * fused_block_size];".format(InType))
code.append(
" const {} next_T0 = (dataInd < index_size - prefdist_T0)\n"
" ? (dataInd + prefdist_T0)\n : dataInd;".format(
IndexType
)
)
code.append(" const " + IndexType + " idx_pref_T0 = indices[next_T0];")
code.append(
" if (idx_pref_T0 < 0 || idx_pref_T0 >= data_size) {\n"
+ " return false;\n"
+ " }"
)
code.append(
" const {}* ip_next_T0 = "
"&input[idx_pref_T0 * fused_block_size];".format(InType)
)
for i in range(0, uf):
j = 8 * i
cachelinesize = 64
byteoffset = sizeof[InType] * j
prefetch = (byteoffset % cachelinesize) == 0
code.extend(compute(j, InType, use_weights, isa, prefetch))
code.append(" }")
if use_offsets:
code.append(" if (!normalize_by_lengths || length == 0) {")
else:
code.append(" if (!normalize_by_lengths || lengths[rangeIndex] == 0) {")
for i in range(0, uf):
j = 8 * i
code.append(" _mm256_storeu_ps(&op[" + str(j) + "], vop" + str(j) + ");")
code.append(" } else {")
# inv of length
if use_offsets:
code.append(" __m256 vlen_inv = _mm256_set1_ps(1.0f / length);")
else:
code.append(" __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]);")
for i in range(0, uf):
j = 8 * i
code.append(
" _mm256_storeu_ps(&op["
+ str(j)
+ "], _mm256_mul_ps("
+ "vop"
+ str(j)
+ ", vlen_inv));"
)
code.append(" }")
code.append(" }")
return code
def generic(IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(InType, use_weights, isa):
code = []
if InType == "float":
code.append(
" _mm256_storeu_ps(\n"
" &op[j],\n"
" _mm256_fmadd_ps(\n"
" vwgt, _mm256_loadu_ps(&ip[j]), _mm256_loadu_ps(&op[j])));" # noqa
)
elif InType == "at::Half":
code.append(
" _mm256_storeu_ps(\n"
" &op[j],\n"
" _mm256_fmadd_ps(\n"
" vwgt,\n"
" _mm256_cvtph_ps(_mm_loadu_si128(\n"
" reinterpret_cast<const __m128i*>(&ip[j]))),\n"
" _mm256_loadu_ps(&op[j])));"
)
elif InType == "uint8_t":
code.append(
" _mm256_storeu_ps(\n"
" &op[j],\n"
" _mm256_fmadd_ps(\n"
" vwgt,\n"
" _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(_mm_loadl_epi64(\n" # noqa
" reinterpret_cast<const __m128i*>(&ip[j])))),\n"
" _mm256_add_ps(_mm256_loadu_ps(&op[j]), vbio)));"
)
else:
assert False
code.append(
" _mm_prefetch(\n"
" reinterpret_cast<const char*>(&ip_next_T0[j]), _MM_HINT_T0);"
)
return code
code = []
if InType == "at::Half":
code.append(" alignas(64) at::Half vtmp1[8] = {0};")
if use_offsets:
code.append(
" for ("
+ IndexType
+ " rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) {"
)
else:
code.append(
" for ("
+ IndexType
+ " rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) {"
)
code.append(" " + OutType + "* op = &out[rangeIndex * block_size];")
# initialize to 0
code.append(" int64_t j = 0;")
code.append(" for (; j + 8 <= block_size; j += 8) {")
code.append(" _mm256_storeu_ps(op + j, _mm256_setzero_ps());")
code.append(" }")
code.append(" for (; j < block_size; j++) {")
code.append(" op[j] = 0.0f;")
code.append(" }")
# inner loop
if use_offsets:
code.append(
" if (dataInd != offsets[rangeIndex] - offsets[0]) {\n"
+ " return false;\n"
+ " }"
)
code.append("""\
int64_t end_offset = offsets[rangeIndex + 1];
int64_t length = end_offset - offsets[rangeIndex];""")
code.append(
" for ("
+ "int64_t"
+ " start = dataInd; dataInd < end_offset - offsets[0];\n ++dataInd) {" # noqa
)
else:
code.append(
" if (dataInd + lengths[rangeIndex] > index_size) {\n"
+ " return false;\n"
+ " }"
)
code.append(
" for ("
+ IndexType
+ " start = dataInd; dataInd < start + lengths[rangeIndex];\n ++dataInd) {" # noqa
)
code.append(" const " + IndexType + " idx = indices[dataInd];")
code.append(
" if (idx < 0 || idx >= data_size) {\n"
+ " return false;\n"
+ " }"
)
if InType == "uint8_t":
code.append(" " + OutType + " wgt = 1.f;")
code.append(" " + OutType + " bio;")
code.append(" if (weights) {")
code.append(
" wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];" # noqa
)
code.append(" }")
if fused:
code.append(
" const float* scale_bias = reinterpret_cast<const float*>(\n"
" &input[idx * fused_block_size + block_size]);"
)
code.append(" bio = wgt * scale_bias[1];")
code.append(" wgt = wgt * scale_bias[0];")
else:
code.append(" bio = wgt * scale_bias[2 * idx + 1];")
code.append(" wgt = wgt * scale_bias[2 * idx];")
code.append(" __m256 vbio = _mm256_set1_ps(bio);")
else:
code.append(" " + OutType + " wgt = 1.f;")
code.append(" if (weights) {")
code.append(
" wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];" # noqa
)
code.append(" }")
code.append(" __m256 vwgt = _mm256_set1_ps(wgt);")
code.append(" const {}* ip = &input[idx * fused_block_size];".format(InType))
code.append(
" const {} next_T0 = (dataInd < index_size - prefdist_T0)\n"
" ? (dataInd + prefdist_T0)\n : dataInd;".format(
IndexType
)
)
code.append(" const " + IndexType + " idx_pref_T0 = indices[next_T0];")
code.append(
" if (idx_pref_T0 < 0 || idx_pref_T0 >= data_size) {\n"
+ " return false;\n"
+ " }"
)
code.append(
" const {}* ip_next_T0 = "
"&input[idx_pref_T0 * fused_block_size];".format(InType)
)
# compute and store main loop
code.append(" j = 0;")
code.append(" for (; j + 8 <= block_size; j += 8) {")
code.extend(compute(InType, use_weights, isa))
code.append(" }")
# leftover
code.append(" for (; j < block_size; j++) {")
if InType == "float":
code.append(" op[j] = std::fma(wgt, ip[j], op[j]);")
elif InType == "at::Half":
code.append(" vtmp1[0] = ip[j];")
code.append(
" __m256 vtmp2 =\n"
" _mm256_cvtph_ps(*(reinterpret_cast<const __m128i*>(vtmp1)));"
)
code.append(" op[j] = std::fma(wgt, ((float*)(&vtmp2))[0], op[j]);")
elif InType == "uint8_t":
code.append(" op[j] = std::fma(wgt, (float)ip[j], bio + op[j]);")
else:
assert False
code.append(" }")
code.append(" }")
if use_offsets:
code.append(" if (normalize_by_lengths && length) {")
code.append(" float len_inv = 1.0f / length;")
else:
code.append(" if (normalize_by_lengths && lengths[rangeIndex]) {")
code.append(" float len_inv = 1.0f / lengths[rangeIndex];")
code.append(" __m256 vlen_inv = _mm256_set1_ps(len_inv);")
code.append(" j = 0;")
code.append(" for (; j + 8 <= block_size; j += 8) {")
code.append(
" _mm256_storeu_ps(\n"
" &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv));"
)
code.append(" }")
code.append(" for (; j < block_size; j++) {")
code.append(" op[j] = len_inv * op[j];")
code.append(" }")
code.append(" }")
code.append(" }")
return code
# start main code
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--filename", help="file name")
parser.add_argument("--fused", action="store_true")
parser.add_argument("--use-offsets", action="store_true")
opts = parser.parse_args()
if opts.filename:
filename = opts.filename
elif opts.fused:
if opts.use_offsets:
filename = "embedding_lookup_fused_8bit_rowwise_idx_avx2.cc"
else:
filename = "embedding_lookup_fused_8bit_rowwise_avx2.cc"
else:
if opts.use_offsets:
filename = "embedding_lookup_idx_avx2.cc"
else:
filename = "embedding_lookup_avx2.cc"
options = [
["int32_t", "int", "float", "float", "float", "float"],
["int64_t", "int64_t", "float", "float", "float", "float"],
["int32_t", "int", "half", "at::Half", "float", "float"],
["int64_t", "int64_t", "half", "at::Half", "float", "float"],
["int32_t", "int", "uint8_t", "uint8_t", "float", "float"],
["int64_t", "int64_t", "uint8_t", "uint8_t", "float", "float"],
]
code = []
# includes
code.append("//// --------------------------")
code.append("//// ATTENTION:")
code.append("//// THIS CODE IS AUTOGENERATED")
code.append("//// BY {}".format(sys.argv[0]))
code.append("//// DO NOT MODIFY!!!")
code.append("//// --------------------------\n")
code.append("#include <c10/util/Half.h>")
code.append("#include <immintrin.h>")
code.append("namespace caffe2 {\n")
for o in options:
[IndexTypeName, IndexType, InTypeName, InType, OutTypeName, OutType] = o
prefix = "Fused8BitRowwise" if opts.fused else ""
code.append("template <bool IS_WEIGHT_POSITIONAL>")
if opts.use_offsets:
fn_base = "{}EmbeddingLookupIdx_{}_{}_{}".format(
prefix, IndexTypeName, InTypeName, OutTypeName
)
else:
fn_base = "{}EmbeddingLookup_{}_{}_{}".format(
prefix, IndexTypeName, InTypeName, OutTypeName
)
suffix = "__avx2_fma"
fn = "static bool " + fn_base + suffix
code.append(fn + "(")
args = []
args.append(" const int64_t block_size,")
args.append(" const int64_t output_size,")
args.append(" const int64_t index_size,")
args.append(" const int64_t data_size,")
args.append(" const " + InType + "* input,")
args.append(" const " + IndexType + "* indices,")
if opts.use_offsets:
args.append(" const " + IndexType + "* offsets,")
else:
args.append(" const int* lengths,")
args.append(" const float* weights,")
if not opts.fused:
args.append(" const float* scale_bias,")
args.append(" bool normalize_by_lengths,")
args.append(" " + OutType + "* out) {")
code += args
code.append(" const " + IndexType + " prefdist_T0 = 16;")
# block_size is the number of elements and fused_block_size is the size of
# an entire row, including scale and bias.
offset = (8 // sizeof[InType]) if opts.fused else 0
code.append(
" const {} fused_block_size = block_size + {};".format(IndexType, offset)
)
if opts.use_offsets:
code.append(" int64_t dataInd = 0;")
else:
code.append(" " + IndexType + " dataInd = 0;")
# code.append("printf(\"calling " + fn + "\\n\");");
code.append(" if (block_size == 128) {")
code += unroll(16, IndexType, InType, OutType, True, "AVX2", opts.fused, opts.use_offsets)
code.append(" } else if (block_size == 64) {")
code += unroll(8, IndexType, InType, OutType, True, "AVX2", opts.fused, opts.use_offsets)
code.append(" } else if (block_size == 32) {")
code += unroll(4, IndexType, InType, OutType, True, "AVX2", opts.fused, opts.use_offsets)
code.append(" } else if (block_size == 16) {")
code += unroll(2, IndexType, InType, OutType, True, "AVX2", opts.fused, opts.use_offsets)
code.append(" } else {")
code.append(" // generic code")
code += generic(IndexType, InType, OutType, True, "AVX2", opts.fused, opts.use_offsets)
code.append(" }")
code.append(" return dataInd == index_size;")
code.append("}")
for is_weight_positional in ["false", "true"]:
code.append("bool " + fn_base + "_" + is_weight_positional + suffix + "(")
code += args
# Resolve the Lint warnings: Limit of 80 characters in one line.
extra_space = "\n "
ret_string = " return " + fn_base + suffix + "<" + is_weight_positional + ">("
if len(ret_string) <= 80:
code.append(ret_string)
else:
code.append(" return " + fn_base + suffix + "<" + extra_space + is_weight_positional + ">(")
code.append(" block_size,")
code.append(" output_size,")
code.append(" index_size,")
code.append(" data_size,")
code.append(" input,")
code.append(" indices,")
if opts.use_offsets:
code.append(" offsets,")
else:
code.append(" lengths,")
code.append(" weights,")
if not opts.fused:
code.append(" scale_bias,")
code.append(" normalize_by_lengths,")
code.append(" out);")
code.append("}")
code.append("")
code.append("} // namespace caffe2")
with open(filename, "w") as fout:
for c in code:
# print(c, file = fout)
fout.write(c + "\n")
print("Created " + filename)
| 37.271186 | 108 | 0.468647 |
4aa6161512337b9f60de396bc8e08d2f621fbf10 | 15,184 | py | Python | src/detext/run_detext.py | qingquansong/detext | 66df145e653ce05af094d3379e27b60d0d3c81b4 | [
"BSD-2-Clause"
] | 1 | 2020-04-15T19:26:05.000Z | 2020-04-15T19:26:05.000Z | src/detext/run_detext.py | qingquansong/detext | 66df145e653ce05af094d3379e27b60d0d3c81b4 | [
"BSD-2-Clause"
] | null | null | null | src/detext/run_detext.py | qingquansong/detext | 66df145e653ce05af094d3379e27b60d0d3c81b4 | [
"BSD-2-Clause"
] | null | null | null | """
Overall pipeline to train the model. It parses arguments, and trains a CLSM model.
"""
import sys
import argparse
import logging
import os
import time
import tensorflow as tf
import tensorflow_ranking as tfr
from detext.train import train
from detext.utils import misc_utils, logger, executor_utils
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument("--ftr_ext", choices=['cnn', 'bert', 'lstm_lm', 'lstm'], help="NLP feature extraction module.")
parser.add_argument("--num_units", type=int, default=128, help="word embedding size.")
parser.add_argument("--num_units_for_id_ftr", type=int, default=128, help="id feature embedding size.")
parser.add_argument("--num_hidden", type=str, default='0', help="hidden size.")
parser.add_argument("--num_wide", type=int, default=0, help="number of wide features per doc.")
parser.add_argument("--num_wide_sp", type=int, default=None, help="number of sparse wide features per doc")
parser.add_argument("--use_deep", type=str2bool, default=True, help="Whether to use deep features.")
parser.add_argument("--elem_rescale", type=str2bool, default=True,
help="Whether to perform elementwise rescaling.")
# Ranking specific
parser.add_argument("--ltr_loss_fn", type=str, default='pairwise', help="learning-to-rank method.")
parser.add_argument("--emb_sim_func", default='inner',
help="Approach to computing query/doc similarity scores: "
"inner/hadamard/concat or any combination of them separated by comma.")
# Classification specific
parser.add_argument("--num_classes", type=int, default=1,
help="Number of classes for multi-class classification tasks.")
# CNN related
parser.add_argument("--filter_window_sizes", type=str, default='3', help="CNN filter window sizes.")
parser.add_argument("--num_filters", type=int, default=100, help="number of CNN filters.")
parser.add_argument("--explicit_empty", type=str2bool, default=False,
help="Explicitly modeling empty string in cnn")
# BERT related
parser.add_argument("--lr_bert", type=float, default=None, help="Learning rate factor for bert components")
parser.add_argument("--bert_config_file", type=str, default=None, help="bert config.")
parser.add_argument("--bert_checkpoint", type=str, default=None, help="pretrained bert model checkpoint.")
# LSTM related
parser.add_argument("--unit_type", type=str, default="lstm",
help="RNN cell unit type. Support lstm/gru/layer_norm_lstm")
parser.add_argument("--num_layers", type=int, default=1, help="RNN layers")
parser.add_argument("--num_residual_layers", type=int, default=0,
help="Number of residual layers from top to bottom. For example, if `num_layers=4` and "
"`num_residual_layers=2`, the last 2 RNN cells in the returned list will be wrapped "
"with `ResidualWrapper`.")
parser.add_argument("--forget_bias", type=float, default=1., help="Forget bias of RNN cell")
parser.add_argument("--rnn_dropout", type=float, default=0., help="Dropout of RNN cell")
parser.add_argument("--bidirectional", type=str2bool, default=False, help="Whether to use bidirectional RNN")
parser.add_argument("--normalized_lm", type=str2bool, default=False,
help="Whether to use normalized lm. This option only works for unit_type=lstm_lm")
# Optimizer
parser.add_argument("--optimizer", type=str, choices=["sgd", "adam", "bert_adam"], default="sgd",
help="Type of optimizer to use. bert_adam is similar to the optimizer implementation in bert.")
parser.add_argument("--max_gradient_norm", type=float, default=1.0, help="Clip gradients to this norm.")
parser.add_argument("--learning_rate", type=float, default=1.0, help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument("--num_train_steps", type=int, default=1, help="Num steps to train.")
parser.add_argument("--num_epochs", type=int, default=None, help="Num of epochs to train, will overwrite train_steps if set")
parser.add_argument("--num_warmup_steps", type=int, default=0, help="Num steps for warmup.")
parser.add_argument("--train_batch_size", type=int, default=32, help="Training data batch size.")
parser.add_argument("--test_batch_size", type=int, default=32, help="Test data batch size.")
parser.add_argument("--l1", type=float, default=None, help="Scale of L1 regularization")
parser.add_argument("--l2", type=float, default=None, help="Scale of L2 regularization")
# Data
parser.add_argument("--train_file", type=str, default=None, help="Train file.")
parser.add_argument("--dev_file", type=str, default=None, help="Dev file.")
parser.add_argument("--test_file", type=str, default=None, help="Test file.")
parser.add_argument("--out_dir", type=str, default=None, help="Store log/model files.")
parser.add_argument("--std_file", type=str, default=None, help="feature standardization file")
parser.add_argument("--max_len", type=int, default=32, help="max sent length.")
parser.add_argument("--min_len", type=int, default=3, help="min sent length.")
# Vocab and word embedding
parser.add_argument("--vocab_file", type=str, default=None, help="Vocab file")
parser.add_argument("--we_file", type=str, default=None, help="Pretrained word embedding file")
parser.add_argument("--we_trainable", type=str2bool, default=True, help="Whether to train word embedding")
parser.add_argument("--PAD", type=str, default="[PAD]", help="Token for padding")
parser.add_argument("--SEP", type=str, default="[SEP]", help="Token for sentence separation")
parser.add_argument("--CLS", type=str, default="[CLS]", help="Token for start of sentence")
parser.add_argument("--UNK", type=str, default="[UNK]", help="Token for unknown word")
parser.add_argument("--MASK", type=str, default="[MASK]", help="Token for masked word")
# Vocab and word embedding for id features
parser.add_argument("--vocab_file_for_id_ftr", type=str, default=None, help="Vocab file for id features")
parser.add_argument("--we_file_for_id_ftr", type=str, default=None,
help="Pretrained word embedding file for id features")
parser.add_argument("--we_trainable_for_id_ftr", type=str2bool, default=True,
help="Whether to train word embedding for id features")
parser.add_argument("--PAD_FOR_ID_FTR", type=str, default="[PAD]", help="Padding token for id features")
parser.add_argument("--UNK_FOR_ID_FTR", type=str, default="[UNK]", help="Unknown word token for id features")
# Misc
parser.add_argument("--random_seed", type=int, default=1234, help="Random seed (>0, set a specific seed).")
parser.add_argument("--steps_per_stats", type=int, default=100, help="training steps to print statistics.")
parser.add_argument("--steps_per_eval", type=int, default=1000, help="training steps to evaluate datasets.")
parser.add_argument("--keep_checkpoint_max", type=int, default=5,
help="The maximum number of recent checkpoint files to keep. If 0, all checkpoint "
"files are kept. Defaults to 5")
parser.add_argument("--feature_names", type=str, default=None, help="the feature names.")
parser.add_argument("--lambda_metric", type=str, default=None, help="only support ndcg.")
parser.add_argument("--init_weight", type=float, default=0.1, help="weight initialization value.")
parser.add_argument("--pmetric", type=str, default=None, help="Primary metric.")
parser.add_argument("--all_metrics", type=str, default=None, help="All metrics.")
parser.add_argument("--score_rescale", type=str, default=None, help="The mean and std of previous model.")
parser.add_argument("--tokenization", type=str, default='punct', choices=['plain', 'punct'],
help="The tokenzation performed for data preprocessing. "
"Currently support: punct/plain(no split). "
"Note that this should be set correctly to ensure consistency for savedmodel.")
parser.add_argument("--resume_training", type=str2bool, default=False,
help="Whether to resume training from checkpoint in out_dir.")
parser.add_argument("--metadata_path", type=str, default=None,
help="The metadata_path for converted avro2tf avro data.")
# tf-ranking related
parser.add_argument("--use_tfr_loss", type=str2bool, default=False, help="whether to use tf-ranking loss.")
parser.add_argument('--tfr_loss_fn',
choices=[
tfr.losses.RankingLossKey.SOFTMAX_LOSS,
tfr.losses.RankingLossKey.PAIRWISE_LOGISTIC_LOSS],
default=tfr.losses.RankingLossKey.SOFTMAX_LOSS,
help="softmax_loss")
parser.add_argument('--tfr_lambda_weights', type=str, default=None)
parser.add_argument('--use_horovod', type=str2bool, default=False, help="whether to use horovod for sync distributed training")
def str2bool(v):
if v.lower() in ('true', '1'):
return True
elif v.lower() in ('false', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
ftr_ext=flags.ftr_ext,
filter_window_sizes=flags.filter_window_sizes,
num_units=flags.num_units,
num_units_for_id_ftr=flags.num_units_for_id_ftr,
num_filters=flags.num_filters,
num_hidden=flags.num_hidden,
num_wide=flags.num_wide,
ltr_loss_fn=flags.ltr_loss_fn,
use_deep=flags.use_deep,
elem_rescale=flags.elem_rescale,
emb_sim_func=flags.emb_sim_func,
num_classes=flags.num_classes,
optimizer=flags.optimizer,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
lr_bert=flags.lr_bert,
num_train_steps=flags.num_train_steps,
num_epochs=flags.num_epochs,
num_warmup_steps=flags.num_warmup_steps,
train_batch_size=flags.train_batch_size,
test_batch_size=flags.test_batch_size,
train_file=flags.train_file,
dev_file=flags.dev_file,
test_file=flags.test_file,
out_dir=flags.out_dir,
vocab_file=flags.vocab_file,
we_file=flags.we_file,
we_trainable=flags.we_trainable,
random_seed=flags.random_seed,
steps_per_stats=flags.steps_per_stats,
steps_per_eval=flags.steps_per_eval,
keep_checkpoint_max=flags.keep_checkpoint_max,
max_len=flags.max_len,
min_len=flags.min_len,
feature_names=flags.feature_names,
lambda_metric=flags.lambda_metric,
bert_config_file=flags.bert_config_file,
bert_checkpoint=flags.bert_checkpoint,
init_weight=flags.init_weight,
pmetric=flags.pmetric,
std_file=flags.std_file,
num_wide_sp=flags.num_wide_sp,
all_metrics=flags.all_metrics,
score_rescale=flags.score_rescale,
explicit_empty=flags.explicit_empty,
tokenization=flags.tokenization,
unit_type=flags.unit_type,
num_layers=flags.num_layers,
num_residual_layers=flags.num_residual_layers,
forget_bias=flags.forget_bias,
rnn_dropout=flags.rnn_dropout,
bidirectional=flags.bidirectional,
PAD=flags.PAD,
SEP=flags.SEP,
CLS=flags.CLS,
UNK=flags.UNK,
MASK=flags.MASK,
resume_training=flags.resume_training,
metadata_path=flags.metadata_path,
tfr_loss_fn=flags.tfr_loss_fn,
tfr_lambda_weights=flags.tfr_lambda_weights,
use_tfr_loss=flags.use_tfr_loss,
use_horovod=flags.use_horovod,
normalized_lm=flags.normalized_lm,
# Vocab and word embedding for id features
PAD_FOR_ID_FTR=flags.PAD_FOR_ID_FTR,
UNK_FOR_ID_FTR=flags.UNK_FOR_ID_FTR,
vocab_file_for_id_ftr=flags.vocab_file_for_id_ftr,
we_file_for_id_ftr=flags.we_file_for_id_ftr,
we_trainable_for_id_ftr=flags.we_trainable_for_id_ftr,
l1=flags.l1,
l2=flags.l2,
)
def get_hparams(argv):
"""
Get hyper-parameters.
"""
parser = argparse.ArgumentParser()
add_arguments(parser)
hparams, unknown_params = parser.parse_known_args(argv)
hparams = create_hparams(hparams)
# Print all hyper-parameters
for k, v in sorted(vars(hparams).items()):
print('--' + k + '=' + str(v))
return hparams
def main(argv):
"""
This is the main method for training the model.
:param argv: training parameters
:return:
"""
# Get executor task type from TF_CONFIG
task_type = executor_utils.get_executor_task_type()
# Get hyper-parameters.
hparams = get_hparams(argv)
tf.logging.set_verbosity(tf.logging.INFO)
# if epoch is set, overwrite training steps
if hparams.num_epochs is not None:
hparams.num_train_steps = misc_utils.estimate_train_steps(
hparams.train_file,
hparams.num_epochs,
hparams.train_batch_size,
hparams.metadata_path is None)
# Create directory and launch tensorboard
if task_type == executor_utils.CHIEF or task_type == executor_utils.LOCAL_MODE:
# If not resume training from checkpoints, delete output directory.
if not hparams.resume_training:
logging.info("Removing previous output directory...")
if tf.gfile.Exists(hparams.out_dir):
tf.gfile.DeleteRecursively(hparams.out_dir)
# If output directory deleted or does not exist, create the directory.
if not tf.gfile.Exists(hparams.out_dir):
logging.info('Creating dirs recursively at: {0}'.format(hparams.out_dir))
tf.gfile.MakeDirs(hparams.out_dir)
misc_utils.save_hparams(hparams.out_dir, hparams)
# set up logger
sys.stdout = logger.Logger(os.path.join(hparams.out_dir, 'logging.txt'))
else:
# TODO: move removal/creation to a hadoopShellJob st. it does not reside in distributed training code.
logging.info("Waiting for chief to remove/create directories.")
# Wait for dir created form chief
time.sleep(10)
if task_type == executor_utils.EVALUATOR:
# set up logger for evaluator
sys.stdout = logger.Logger(os.path.join(hparams.out_dir, 'eval_log.txt'))
hparams = misc_utils.extend_hparams(hparams)
logging.info("***********DeText Training***********")
# Train and evaluate DeText model
train.train(hparams)
if __name__ == '__main__':
tf.compat.v1.app.run(main=main)
| 48.980645 | 131 | 0.679004 |
e271479dabf329eb1792b4d958c4b53c06005d37 | 211 | py | Python | practice/practice73.py | tomhaoye/LetsPython | 3c5f66d2e672067ed9aea33c0abd6b01708734ff | [
"MIT"
] | null | null | null | practice/practice73.py | tomhaoye/LetsPython | 3c5f66d2e672067ed9aea33c0abd6b01708734ff | [
"MIT"
] | null | null | null | practice/practice73.py | tomhaoye/LetsPython | 3c5f66d2e672067ed9aea33c0abd6b01708734ff | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
if __name__ == '__main__':
ptr = []
for i in range(5):
num = int(raw_input('input a number:'))
ptr.append(num)
ptr.reverse()
print ptr
| 17.583333 | 47 | 0.530806 |
3db39d4949acf4fac56f7a77d4e1abb56f61e4d4 | 29,036 | py | Python | rest_api/tests/basic_table_tests.py | InspectorIncognito/gtfs-editor | 4e3245f44ec44aeb2d28aa25786dc95a3193fb81 | [
"MIT"
] | 2 | 2021-10-01T16:11:20.000Z | 2022-01-15T10:55:40.000Z | rest_api/tests/basic_table_tests.py | InspectorIncognito/gtfs-editor | 4e3245f44ec44aeb2d28aa25786dc95a3193fb81 | [
"MIT"
] | 3 | 2021-06-10T19:17:55.000Z | 2022-03-05T08:37:46.000Z | rest_api/tests/basic_table_tests.py | InspectorIncognito/gtfs-editor | 4e3245f44ec44aeb2d28aa25786dc95a3193fb81 | [
"MIT"
] | 1 | 2022-03-05T08:37:53.000Z | 2022-03-05T08:37:53.000Z | import datetime
from rest_framework import status
from rest_api.models import Calendar, FeedInfo, Agency, Stop, Route, Trip, StopTime, Level, Shape, ShapePoint, \
CalendarDate, Pathway, Transfer, FareAttribute, Frequency
from rest_api.serializers import CalendarSerializer, LevelSerializer, StopSerializer, \
FeedInfoSerializer, AgencySerializer, RouteSerializer, TripSerializer, StopTimeSerializer, DetailedShapeSerializer, \
CalendarDateSerializer, PathwaySerializer, TransferSerializer, FrequencySerializer, FareAttributeSerializer, \
ShapePointSerializer
from rest_api.tests.test_helpers import BaseTableTest, BasicTestSuiteMixin
# Parametrized test suite. Implementing classes require a bunch of parameters in order
# to run the tests. The tests focus on checking the correct behavior of basic REST
# requests and their failure on invalid data.
class CalendarTableTest(BaseTableTest,
BasicTestSuiteMixin):
table_name = "project-calendars"
class Meta:
model = Calendar
serializer = CalendarSerializer
initial_size = 2
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(project=project,
service_id=data['service_id'])[0].id
# retrieve params
retrieve_data = {
'service_id': 'mon-fri'
}
# create params
create_data = {
'service_id': 'I created my own',
'monday': False,
'tuesday': False,
'wednesday': False,
'thursday': False,
'friday': False,
'saturday': False,
'sunday': False,
'start_date': "2020-01-01",
'end_date': "2020-12-31"
}
# delete params
delete_data = {
'service_id': 'mon-fri'
}
# put params
put_data = {
'service_id': 'mon-fri',
'monday': False,
'tuesday': False,
'wednesday': False,
'thursday': False,
'friday': False,
'saturday': True,
"sunday": True,
'start_date': "2020-01-01",
'end_date': "2020-12-31"
}
# patch params
patch_data = {
'service_id': 'mon-fri',
'saturday': True,
"sunday": True,
'start_date': '2020-01-02'
}
class StopTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-stops"
class Meta:
model = Stop
serializer = StopSerializer
initial_size = 42
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(project=project,
stop_id=data['stop_id'])[0].id
# retrieve params
retrieve_data = {
'stop_id': 'stop_1'
}
# create params
create_data = {
'stop_id': 'stop-created',
'stop_code': 'PD-created',
'stop_name': 'Stop That Has Been Created',
'stop_lat': 100,
'stop_lon': -200,
'stop_url': 'http://www.fake-stop.cl'
}
# delete params
delete_data = {
'stop_id': 'stop_delete'
}
# put params
put_data = {
'stop_id': 'stop_1',
'stop_code': 'PD-bananas',
'stop_name': 'Stop -1',
'stop_lat': -1,
'stop_lon': -2,
'stop_url': 'http://www.stop-1.cl'
}
# patch params
patch_data = {
'stop_id': 'stop_1',
'stop_url': 'http://www.stop-1-patched.cl'
}
class FeedInfoTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-feedinfo"
class Meta:
model = FeedInfo
serializer = FeedInfoSerializer
initial_size = 1
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(project=project,
feed_publisher_name=data['feed_publisher_name'])[0].id
# retrieve params
retrieve_data = {
'feed_publisher_name': 'Test Agency'
}
# delete params
delete_data = {
'feed_publisher_name': 'Test Agency'
}
# patch params
patch_data = {
'feed_publisher_name': 'Test Agency',
'feed_lang': 'ES',
'feed_version': '1.2.3'
}
# This should fail because each project can only have one feed info
def test_create(self):
data = {
'feed_publisher_name': 'Test Agency 2',
'feed_publisher_url': 'www.testagency.com',
'feed_lang': 'ES',
'feed_start_date': "2020-01-01",
'feed_end_date': "2020-12-31",
'feed_version': '1.2.3',
'feed_id': 'Test Feed 1'
}
with self.assertNumQueries(0):
json_response = self.create(self.project.project_id, self.client, data, status.HTTP_400_BAD_REQUEST)
# This should fail because PUT is not supported for one-to-one
def test_put(self):
data = {
'feed_publisher_name': 'Test Agency',
'feed_publisher_url': 'www.testagency.com',
'feed_lang': 'ES',
'feed_start_date': "2020-01-01",
'feed_end_date': "2020-12-31",
'feed_version': '1.2.3',
'feed_id': 'Test Feed 1'
}
with self.assertNumQueries(2):
id = self.Meta().get_id(self.project, data)
json_response = self.put(self.project.project_id, id, self.client, data, status.HTTP_400_BAD_REQUEST)
class AgencyTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-agencies"
class Meta:
model = Agency
serializer = AgencySerializer
initial_size = 3
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(project=project,
agency_id=data['agency_id'])[0].id
# retrieve params
retrieve_data = {
'agency_id': 'test_agency'
}
# create params
create_data = {
'agency_id': "test_agency_2",
'agency_name': "Test Agency 2",
'agency_url': "http://www.testagency2.com",
'agency_timezone': "America/Santiago"
}
# delete params
delete_data = {
'agency_id': 'test_agency'
}
# put params
put_data = {
'agency_id': "test_agency",
'agency_name': "Test Agency 2",
'agency_url': "http://www.testagency2.com",
'agency_timezone': "America/Santiago"
}
# patch params
patch_data = {
'agency_id': "test_agency",
'agency_url': "http://www.testagency3.com"
}
class RouteTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-routes"
class Meta:
model = Route
serializer = RouteSerializer
initial_size = 6
invalid_id = 123456789
ignore_fields = ['agency__agency_id']
def get_id(self, project, data):
return self.model.objects.filter(agency__project=project,
agency__agency_id=data['agency__agency_id'],
route_id=data['route_id'])[0].id
# retrieve params
retrieve_data = {
'agency__agency_id': 'agency_0',
'route_id': 'test_route'
}
# create params
create_data = {
'agency__agency_id': 'test_agency',
'route_id': "test_route_2",
'route_short_name': "Test Route 2",
'route_long_name': "Test Route 2 - The Routening",
'route_desc': "This route was made for testing create endpoint",
'route_type': 1,
'route_url': "http://www.testroute2.com",
'route_color': "FF00FF",
'route_text_color': "00FF00",
}
# delete params
delete_data = {
'agency__agency_id': 'agency_0',
'route_id': 'test_route'
}
# put params
put_data = {
'agency__agency_id': 'agency_0',
'route_id': "test_route",
'route_short_name': "Test Route 2",
'route_long_name': "Test Route 2 - The Routening",
'route_desc': "This route was made for testing create endpoint",
'route_type': 1,
'route_url': "http://www.testroute2.com",
'route_color': "FF00FF",
'route_text_color': "00FF00",
}
# patch params
patch_data = {
'agency__agency_id': 'agency_0',
'route_id': "test_route",
'route_desc': "I have updated just a small part of the route"
}
def test_put(self):
data = self.Meta.put_data
data['agency'] = Agency.objects.filter(project=self.project, agency_id=data['agency__agency_id'])[0].id
super().test_put()
def test_create(self):
data = self.Meta.create_data
data['agency'] = Agency.objects.filter(project=self.project, agency_id=data['agency__agency_id'])[0].id
super().test_create()
class TripTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-trips"
class Meta:
model = Trip
serializer = TripSerializer
initial_size = 5
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(project=project,
trip_id=data['trip_id'])[0].id
# retrieve params
retrieve_data = {
'trip_id': 'test_trip'
}
# create params
create_data = {
'trip_id': "test_trip_create",
'service_id': 'transantiago',
'trip_headsign': 'TRAN',
'shape': None,
'direction_id': 1,
}
# delete params
delete_data = {
'trip_id': 'test_trip'
}
# put params
put_data = {
'trip_id': "test_trip",
'service_id': 'transantiago',
'trip_headsign': 'TRAN',
'shape': None,
'direction_id': 0,
}
# patch params
patch_data = {
'trip_id': 'test_trip',
'direction_id': 0
}
def test_create(self):
data = self.Meta.create_data
data['route'] = Route.objects.filter(agency__project_id=self.project, route_id='trip_test_route')[0].id
super().test_create()
def test_put(self):
data = self.Meta.put_data
data['route'] = Route.objects.filter(agency__project_id=self.project, route_id='trip_test_route')[0].id
super().test_put()
class StopTimesTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-stoptimes"
def enrich_data(self, data):
test_trip = Trip.objects.filter(project=self.project,
trip_id='trip0')[0].id
test_stop = Stop.objects.filter(project=self.project,
stop_id="stop_0")[0].id
data['stop'] = test_stop
data['trip'] = test_trip
class Meta:
model = StopTime
serializer = StopTimeSerializer
initial_size = 44
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(stop__project_id=project,
trip=data['trip'],
stop=data['stop'],
stop_sequence=data['stop_sequence'])[0].id
# retrieve params
retrieve_data = {
'stop_sequence': 1
}
# create params
create_data = {
'stop_sequence': 12
}
# delete params
delete_data = {
'stop_sequence': 1
}
# put params
put_data = {
'stop_sequence': 1
}
# patch params
patch_data = {
'stop_sequence': 1
}
def test_delete(self):
self.enrich_data(self.Meta.delete_data)
super().test_delete()
def test_retrieve(self):
self.enrich_data(self.Meta.retrieve_data)
super().test_retrieve()
def test_patch(self):
self.enrich_data(self.Meta.patch_data)
super().test_patch()
def test_put(self):
self.enrich_data(self.Meta.put_data)
super().test_put()
def test_create(self):
self.enrich_data(self.Meta.create_data)
print(self.Meta.create_data)
super().test_create()
class ShapeTableTest(BaseTableTest):
table_name = 'project-shapes'
def get_id(self, shape_id):
return Shape.objects.filter(project=self.project,
shape_id=shape_id)[0].id
def test_list(self):
with self.assertNumQueries(2):
json_response = self.list(self.project.project_id, self.client, dict())
self.assertEqual(len(json_response), 2)
def test_retrieve(self):
shape_id = 'shape_1'
data = {
'shape_id': shape_id
}
id = self.get_id(shape_id)
with self.assertNumQueries(2):
json_response = self.retrieve(self.project.project_id, id, self.client, dict())
target = Shape.objects.filter(project=self.project, **data)[0]
self.assertEqual(json_response, DetailedShapeSerializer(target).data)
def test_delete(self):
shape_id = 'shape_1'
data = {
'shape_id': shape_id
}
id = self.get_id(shape_id)
# 1 extra query to erase the shapepoints (cascade)
with self.assertNumQueries(5):
json_response = self.delete(self.project.project_id, id, self.client, dict())
self.assertEqual(Shape.objects.filter(**data).count(), 0)
def test_put(self):
shape_id = 'shape_1'
data = {
'shape_id': shape_id,
'points': [
{
"shape_pt_sequence": 1,
"shape_pt_lat": 0,
"shape_pt_lon": 0
},
{
"shape_pt_sequence": 2,
"shape_pt_lat": 0,
"shape_pt_lon": 1
},
{
"shape_pt_sequence": 3,
"shape_pt_lat": 1,
"shape_pt_lon": 1
},
{
"shape_pt_sequence": 4,
"shape_pt_lat": 2,
"shape_pt_lon": 2
}
]
}
id = self.get_id(shape_id)
json_response = self.put(self.project.project_id, id, self.client, data)
data['id'] = json_response['id']
self.assertDictEqual(data, json_response)
def test_patch(self):
shape_id = 'shape_1'
data = {
'shape_id': shape_id
}
id = self.get_id(shape_id)
json_response = self.patch(self.project.project_id, id, self.client, data)
def test_create(self):
shape_id = 'shape_create'
data = {
'shape_id': shape_id,
'points': [
{
"shape_pt_sequence": 1,
"shape_pt_lat": 0,
"shape_pt_lon": 0
},
{
"shape_pt_sequence": 2,
"shape_pt_lat": 0,
"shape_pt_lon": 1
},
{
"shape_pt_sequence": 3,
"shape_pt_lat": 1,
"shape_pt_lon": 1
},
{
"shape_pt_sequence": 4,
"shape_pt_lat": 2,
"shape_pt_lon": 2
}
]
}
json_response = self.create(self.project.project_id, self.client, data)
data['id'] = json_response['id']
self.assertDictEqual(data, json_response)
def test_delete_invalid(self):
id = 123456789
self.delete(self.project.project_id, id, self.client, dict(), status.HTTP_404_NOT_FOUND)
def test_put_invalid(self):
id = 123456789
self.put(self.project.project_id, id, self.client, dict(), status.HTTP_404_NOT_FOUND)
def test_patch_invalid(self):
id = 123456789
self.patch(self.project.project_id, id, self.client, dict(), status.HTTP_404_NOT_FOUND)
def test_retrieve_invalid(self):
id = 123456789
self.retrieve(self.project.project_id, id, self.client, dict(), status.HTTP_404_NOT_FOUND)
class LevelTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-levels"
class Meta:
model = Level
serializer = LevelSerializer
initial_size = 5
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(project=project,
level_id=data['level_id'],
level_index=data['level_index'])[0].id
# retrieve params
retrieve_data = {
'level_id': 'test_level',
'level_index': 0
}
# create params
create_data = {
'level_id': "test_level_2",
'level_index': 1,
'level_name': "Test Level 2"
}
# delete params
delete_data = {
'level_id': 'test_level',
'level_index': 0
}
# put params
put_data = {
'level_id': "test_level",
'level_index': 0,
'level_name': "New Name"
}
# patch params
patch_data = {
'level_id': "test_level",
'level_index': 0,
'level_name': "New Name2"
}
class CalendarDateTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-calendardates"
class Meta:
model = CalendarDate
serializer = CalendarDateSerializer
initial_size = 2
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(project=project,
date=data['date'])[0].id
# retrieve params
retrieve_data = {
'date': '2020-09-18'
}
# create params
create_data = {
'date': '2020-09-20',
'exception_type': 200,
'service_id': 'new service id'
}
# delete params
delete_data = {
'date': '2020-09-18'
}
# put params
put_data = {
'date': '2020-09-18',
'exception_type': 100,
'service_id': 'test'
}
# patch params
patch_data = {
'date': '2020-09-18',
'exception_type': 100
}
class PathwayTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-pathways"
class Meta:
model = Pathway
serializer = PathwaySerializer
initial_size = 1
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter_by_project(project.project_id).filter(pathway_id=data['pathway_id'])[0].id
# retrieve params
retrieve_data = {
'pathway_id': 'test_pathway'
}
# create params
create_data = {
'pathway_id': 'test_pathway_created',
'pathway_mode': 10,
'is_bidirectional': False,
'from_stop': 'stop_1',
'to_stop': 'stop_2'
}
# delete params
delete_data = {
'pathway_id': 'test_pathway'
}
# put params
put_data = {
'pathway_id': 'test_pathway',
'pathway_mode': 10,
'is_bidirectional': False,
'from_stop': 'stop_1',
'to_stop': 'stop_2'
}
# patch params
patch_data = {
'pathway_id': 'test_pathway',
'pathway_mode': 1000
}
def enrich_data(self, data):
data.update({
'from_stop': Stop.objects.filter(project=self.project, stop_id='stop_1')[0].id,
'to_stop': Stop.objects.filter(project=self.project, stop_id='stop_2')[0].id
})
def test_create(self):
data = self.Meta.create_data
self.enrich_data(data)
super().test_create()
def test_put(self):
data = self.Meta.put_data
self.enrich_data(data)
super().test_put()
class TransferTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-transfers"
class Meta:
model = Transfer
serializer = TransferSerializer
initial_size = 1
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(from_stop_id=data['from_stop'],
to_stop_id=data['to_stop'])[0].id
# retrieve params
retrieve_data = {
}
# create params
create_data = {
'type': 1
}
# delete params
delete_data = {
}
# put params
put_data = {
'type': 10
}
# patch params
patch_data = {
'type': 100
}
def existing_data(self, data):
data.update({
'from_stop': Stop.objects.filter(project=self.project, stop_id='stop_1')[0].id,
'to_stop': Stop.objects.filter(project=self.project, stop_id='stop_2')[0].id
})
def new_data(self, data):
data.update({
'from_stop': Stop.objects.filter(project=self.project, stop_id='stop_3')[0].id,
'to_stop': Stop.objects.filter(project=self.project, stop_id='stop_4')[0].id
})
def test_delete(self):
self.existing_data(self.Meta.delete_data)
super().test_delete()
def test_retrieve(self):
self.existing_data(self.Meta.retrieve_data)
super().test_retrieve()
def test_patch(self):
self.existing_data(self.Meta.patch_data)
super().test_patch()
def test_put(self):
self.existing_data(self.Meta.put_data)
super().test_put()
def test_create(self):
self.new_data(self.Meta.create_data)
super().test_create()
class FareAttributeTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-fareattributes"
class Meta:
model = FareAttribute
serializer = FareAttributeSerializer
initial_size = 2
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(project=project,
fare_id=data['fare_id'])[0].id
# retrieve params
retrieve_data = {
'fare_id': 'test_fare_attr'
}
# create params
create_data = {
'fare_id': 'test_fare_attr_created',
'price': 1.0,
'currency_type': 'USD',
'payment_method': 2,
'transfers': 3,
'transfer_duration': 3600,
'agency': 'test_agency'
}
# delete params
delete_data = {
'fare_id': 'test_fare_attr'
}
# put params
put_data = {
'fare_id': 'test_fare_attr',
'price': 1.0,
'currency_type': 'USD',
'payment_method': 2,
'transfers': 3,
'transfer_duration': 3600,
'agency': 'test_agency'
}
# patch params
patch_data = {
'fare_id': 'test_fare_attr',
'transfers': 100
}
def enrich_data(self, data):
data['agency'] = Agency.objects.filter_by_project(self.project).filter(agency_id=data['agency'])[0].id
def test_create(self):
self.enrich_data(self.Meta.create_data)
super().test_create()
def test_put(self):
self.enrich_data(self.Meta.put_data)
super().test_put()
class FrequencyTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-frequencies"
class Meta:
model = Frequency
serializer = FrequencySerializer
initial_size = 4
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(trip__project_id=project,
trip_id=data['trip'],
start_time=data['start_time'])[0].id
# retrieve params
retrieve_data = {
'trip': 'trip0',
'start_time': "00:00",
'end_time': "23:00",
'headway_secs': 600,
'exact_times': 0
}
# create params
create_data = {
'trip': 'trip0',
'start_time': datetime.time(10, 0),
'end_time': datetime.time(22, 0),
'headway_secs': 1200,
'exact_times': 1
}
# delete params
delete_data = {
'trip': 'trip0',
'start_time': "00:00",
'end_time': "23:00",
'headway_secs': 600,
'exact_times': 0
}
# put params
put_data = {
'trip': 'trip0',
'start_time': datetime.time(0, 0),
'end_time': datetime.time(23, 0),
'headway_secs': 200,
'exact_times': 1
}
# patch params
patch_data = {
'trip': 'trip0',
'start_time': '00:00:00',
'headway_secs': 200,
'exact_times': 1
}
def add_foreign_ids(self, data):
if 'trip' in data:
data['trip'] = Trip.objects.filter_by_project(self.project.project_id).filter(trip_id=data['trip'])[0].id
def test_delete(self):
self.add_foreign_ids(self.Meta.delete_data)
super().test_delete()
def test_retrieve(self):
self.add_foreign_ids(self.Meta.retrieve_data)
super().test_retrieve()
def test_patch(self):
self.add_foreign_ids(self.Meta.patch_data)
super().test_patch()
def test_put(self):
self.add_foreign_ids(self.Meta.put_data)
super().test_put()
def test_create(self):
self.add_foreign_ids(self.Meta.create_data)
super().test_create()
class ShapePointTableTest(BaseTableTest, BasicTestSuiteMixin):
table_name = "project-shapepoints"
def add_foreign_ids(self, data):
data['shape'] = Shape.objects \
.filter_by_project(self.project.project_id) \
.filter(shape_id=data['shape'])[0].id
class Meta:
model = ShapePoint
serializer = ShapePointSerializer
initial_size = 10
invalid_id = 123456789
def get_id(self, project, data):
return self.model.objects.filter(shape_id=data['shape'],
shape_pt_sequence=data['shape_pt_sequence'])[0].id
# retrieve params
retrieve_data = {
'shape': 'shape_1',
'shape_pt_sequence': 1,
'shape_pt_lat': 0.0,
'shape_pt_lon': 0.0
}
# create params
create_data = {
'shape': 'shape_1',
'shape_pt_sequence': 100,
'shape_pt_lat': 200.0,
'shape_pt_lon': 30.0
}
# delete params
delete_data = {
'shape': 'shape_1',
'shape_pt_sequence': 1
}
# put params
put_data = {
'shape': 'shape_1',
'shape_pt_sequence': 1,
'shape_pt_lat': 1000.0,
'shape_pt_lon': 100.0
}
# patch params
patch_data = {
'shape': 'shape_1',
'shape_pt_sequence': 1,
'shape_pt_lon': 10000.0
}
def test_delete(self):
self.add_foreign_ids(self.Meta.delete_data)
super().test_delete()
def test_retrieve(self):
self.add_foreign_ids(self.Meta.retrieve_data)
super().test_retrieve()
def test_patch(self):
self.add_foreign_ids(self.Meta.patch_data)
super().test_patch()
def test_put(self):
self.add_foreign_ids(self.Meta.put_data)
super().test_put()
def test_create(self):
self.add_foreign_ids(self.Meta.create_data)
super().test_create()
| 28.978044 | 121 | 0.530789 |
6fc82f20780f8efde737cac1b50149d28d70101c | 5,825 | py | Python | catalyst/contrib/dl/callbacks/wandb.py | vsokhatskyi/catalyst | 8516b9d44546600ad597a1fffdf03b7eb23c5e98 | [
"Apache-2.0"
] | null | null | null | catalyst/contrib/dl/callbacks/wandb.py | vsokhatskyi/catalyst | 8516b9d44546600ad597a1fffdf03b7eb23c5e98 | [
"Apache-2.0"
] | null | null | null | catalyst/contrib/dl/callbacks/wandb.py | vsokhatskyi/catalyst | 8516b9d44546600ad597a1fffdf03b7eb23c5e98 | [
"Apache-2.0"
] | null | null | null | from typing import Dict, List # isort:skip
import wandb
from catalyst import utils
from catalyst.core import (
Callback, CallbackNode, CallbackOrder, CallbackScope, State
)
class WandbLogger(Callback):
"""
Logger callback, translates ``state.*_metrics`` to Weights & Biases
Read about Weights & Biases here https://docs.wandb.com/
Example:
.. code-block:: python
from catalyst import dl
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
class Projector(nn.Module):
def __init__(self, input_size):
super().__init__()
self.linear = nn.Linear(input_size, 1)
def forward(self, X):
return self.linear(X).squeeze(-1)
X = torch.rand(16, 10)
y = torch.rand(X.shape[0])
model = Projector(X.shape[1])
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=8)
runner = dl.SupervisedRunner()
runner.train(
model=model,
loaders={
"train": loader,
"valid": loader
},
criterion=nn.MSELoss(),
optimizer=optim.Adam(model.parameters()),
logdir="log_example",
callbacks=[
dl.callbacks.WandbLogger(
project="wandb_logger_example"
)
],
num_epochs=10
)
"""
def __init__(
self,
metric_names: List[str] = None,
log_on_batch_end: bool = False,
log_on_epoch_end: bool = True,
**logging_params,
):
"""
Args:
metric_names (List[str]): list of metric names to log,
if None - logs everything
log_on_batch_end (bool): logs per-batch metrics if set True
log_on_epoch_end (bool): logs per-epoch metrics if set True
**logging_params: any parameters of function `wandb.init`
except `reinit` which is automatically set to `True`
and `dir` which is set to `<logdir>`
"""
super().__init__(
order=CallbackOrder.Logging,
node=CallbackNode.Master,
scope=CallbackScope.Experiment,
)
self.metrics_to_log = metric_names
self.log_on_batch_end = log_on_batch_end
self.log_on_epoch_end = log_on_epoch_end
if not (self.log_on_batch_end or self.log_on_epoch_end):
raise ValueError("You have to log something!")
if (self.log_on_batch_end and not self.log_on_epoch_end) \
or (not self.log_on_batch_end and self.log_on_epoch_end):
self.batch_log_suffix = ""
self.epoch_log_suffix = ""
else:
self.batch_log_suffix = "_batch"
self.epoch_log_suffix = "_epoch"
self.logging_params = logging_params
def _log_metrics(
self,
metrics: Dict[str, float],
step: int,
mode: str,
suffix="",
commit=True
):
if self.metrics_to_log is None:
metrics_to_log = sorted(list(metrics.keys()))
else:
metrics_to_log = self.metrics_to_log
def key_locate(key: str):
"""
Wandb uses first symbol _ for it service purposes
because of that fact, we can not send original metric names
Args:
key: metric name
Returns:
formatted metric name
"""
if key.startswith("_"):
return key[1:]
return key
metrics = {
f"{key_locate(key)}/{mode}{suffix}": value
for key, value in metrics.items() if key in metrics_to_log
}
wandb.log(metrics, step=step, commit=commit)
def on_stage_start(self, state: State):
"""Initialize Weights & Biases"""
wandb.init(**self.logging_params, reinit=True, dir=str(state.logdir))
def on_stage_end(self, state: State):
"""Finish logging to Weights & Biases"""
wandb.join()
def on_batch_end(self, state: State):
"""Translate batch metrics to Weights & Biases"""
if self.log_on_batch_end:
mode = state.loader_name
metrics_ = state.batch_metrics
self._log_metrics(
metrics=metrics_,
step=state.global_step,
mode=mode,
suffix=self.batch_log_suffix,
commit=True
)
def on_loader_end(self, state: State):
"""Translate loader metrics to Weights & Biases"""
if self.log_on_epoch_end:
mode = state.loader_name
metrics_ = state.loader_metrics
self._log_metrics(
metrics=metrics_,
step=state.global_epoch,
mode=mode,
suffix=self.epoch_log_suffix,
commit=False
)
def on_epoch_end(self, state: State):
"""Translate epoch metrics to Weights & Biases"""
extra_mode = "_base"
splitted_epoch_metrics = utils.split_dict_to_subdicts(
dct=state.epoch_metrics,
prefixes=list(state.loaders.keys()),
extra_key=extra_mode,
)
if self.log_on_epoch_end:
self._log_metrics(
metrics=splitted_epoch_metrics[extra_mode],
step=state.global_epoch,
mode=extra_mode,
suffix=self.epoch_log_suffix,
commit=True
)
| 32.18232 | 77 | 0.543519 |
d460b668d6f6a99194e0b523fe8a2485fde04e85 | 4,100 | py | Python | analize/analize.py | radosz99/allegro-scanner | bfe8ffef7383928000465f39b5cf8800629bfae5 | [
"MIT"
] | 1 | 2021-07-10T13:11:03.000Z | 2021-07-10T13:11:03.000Z | analize/analize.py | radosz99/allegro-scanner | bfe8ffef7383928000465f39b5cf8800629bfae5 | [
"MIT"
] | 4 | 2020-04-13T13:40:18.000Z | 2020-04-14T18:51:43.000Z | analize/analize.py | radosz99/allegro-scanner | bfe8ffef7383928000465f39b5cf8800629bfae5 | [
"MIT"
] | null | null | null | from data.connection import get_connection
from data.select import select_user_raports_id
from data.select import select_user_offers
from data.select import select_user_raports
from data.select import select_money_from_offer
from data.select import select_money_from_offer_without_mask
from data.select import select_raport_by_date_and_user
from data.select import select_stats_by_raport_id
from data.select import select_sold_items_by_raport_id_and_offer_id
from data.select import select_stats_by_raport_id_and_key
from analize.raport import get_string_date
import matplotlib.pyplot as plt
def convert_data(data):
data_converted=[]
for single_data in data:
data_converted.append(str(single_data).replace('(','').replace(',','').replace(')',''))
return data_converted
def get_raports_dates(conn, username):
raports = select_user_raports(conn, username)
raports_array=[]
for single_raport in raports:
raports_array.append(str(single_raport[0]) + '\n' + get_string_date(single_raport))
return raports_array
def get_sales_raport(username, with_mask):
conn = get_connection()
raports = convert_data(select_user_raports_id(conn,username))
offers = convert_data(select_user_offers(conn, username, len(raports)))
raports_array = get_raports_dates(conn, username)
sum_array = []
for i in range(len(raports)):
sum_array.append(0)
for x in range (len(offers)):
if (with_mask==True):
money_offer_array = select_money_from_offer(conn, offers[x])
else:
money_offer_array = select_money_from_offer_without_mask(conn, offers[x])
if(len(money_offer_array)==0):
continue
sum_current = int(money_offer_array[0][1]*money_offer_array[0][2])
sum_array[0]=sum_array[0]+sum_current
for y in range(len(raports)-1):
try:
diff = money_offer_array[y+1][2]-money_offer_array[y][2]
except IndexError:
diff =0
if(diff<0):
diff=0
try:
sum_offer_raport = int(money_offer_array[y+1][1]*(money_offer_array[y+1][2]-money_offer_array[y][2]))
except IndexError:
sum_offer_raport =0
sum_array[y+1] = sum_array[y+1]+sum_offer_raport+sum_current
sum_current=sum_current+sum_offer_raport
plt.plot(raports_array, sum_array)
plt.grid(True)
plt.title('Zyski dla wybranych raportow')
plt.xlabel('Numery raportow')
plt.ylabel('Laczna zysk ze sprzedazy [PLN]')
plt.show()
def get_raports_id(username, date1, date2):
date1.replace(".0", ".")
date2.replace(".0", ".")
date1_conv = date1.split(".")
date2_conv = date2.split(".")
raport_1_id = select_raport_by_date_and_user(get_connection(), date1_conv[0], date1_conv[1],date1_conv[2],username)
raport_2_id = select_raport_by_date_and_user(get_connection(), date2_conv[0], date2_conv[1],date2_conv[2],username)
return raport_1_id, raport_2_id
def get_difference_two_raports(username, date1, date2, name):
raports_id = get_raports_id(username, date1, date2)
raports_1_id_conv = remove_redundant_stuff_from_string(raports_id[0])
raports_2_id_conv = remove_redundant_stuff_from_string(raports_id[1])
if(name==''):
offers = select_stats_by_raport_id(get_connection(), raports_2_id_conv)
else:
offers = select_stats_by_raport_id_and_key(get_connection(), raports_2_id_conv, name)
difference=0
#print(offers)
for offer in offers:
items_sold = remove_redundant_stuff_from_string(select_sold_items_by_raport_id_and_offer_id(get_connection(), raports_1_id_conv, offer[0]))
if(int(items_sold)<0):
continue
diff = int(offer[2])-int(items_sold)
if(diff<0):
diff = 0
difference = difference + int(diff*float(offer[1]))
diff = 0
return difference
def remove_redundant_stuff_from_string(string):
return str(string).replace('(','').replace(',','').replace(')','').replace(']','').replace('[','') | 41 | 147 | 0.700244 |
09decbdacfb88c5303637a2ab04bc4279f911223 | 587 | py | Python | ml_data/serve.py | mpszumowski/ml_data_rest_app | 4ded44c77316266461cf46b312f6a8eeefe25cf9 | [
"MIT"
] | null | null | null | ml_data/serve.py | mpszumowski/ml_data_rest_app | 4ded44c77316266461cf46b312f6a8eeefe25cf9 | [
"MIT"
] | null | null | null | ml_data/serve.py | mpszumowski/ml_data_rest_app | 4ded44c77316266461cf46b312f6a8eeefe25cf9 | [
"MIT"
] | null | null | null | import connexion
from cheroot.wsgi import Server
from ml_data.config import rest_cfg
from ml_data.info import __version__, __title__
connexion_app = connexion.FlaskApp(
__name__,
specification_dir='.',
options={
"swagger_ui": True
},
arguments={
'title': __title__,
'version': __version__
}
)
connexion_app.add_api('swagger.yaml')
app = connexion_app.app
if __name__ == "__main__":
bind_address = (rest_cfg['address'], int(rest_cfg['port']))
server = Server(bind_address, app)
print('starting server...')
server.start()
| 22.576923 | 63 | 0.678024 |
2bec12eea91f2a1e01816664f0c8f163adb549d6 | 1,950 | py | Python | project3/analysis.py | DonDzundza/Berkeley-AI-Course-Projects | 3712d96655292e62e4d0ad25792ae19c48618b2b | [
"MIT"
] | null | null | null | project3/analysis.py | DonDzundza/Berkeley-AI-Course-Projects | 3712d96655292e62e4d0ad25792ae19c48618b2b | [
"MIT"
] | null | null | null | project3/analysis.py | DonDzundza/Berkeley-AI-Course-Projects | 3712d96655292e62e4d0ad25792ae19c48618b2b | [
"MIT"
] | null | null | null | # analysis.py
# -----------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero ([email protected]) and Dan Klein ([email protected]).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
######################
# ANALYSIS QUESTIONS #
######################
# Set the given parameters to obtain the specified policies through
# value iteration.
def question2():
answerDiscount = 0.9
answerNoise = 0.0
return answerDiscount, answerNoise
def question3a():
answerDiscount = 0.5
answerNoise = 0.0
answerLivingReward = -2.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3b():
answerDiscount = 0.5
answerNoise = 0.1
answerLivingReward = -1.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3c():
answerDiscount = 0.9
answerNoise = 0.0
answerLivingReward = -2.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3d():
answerDiscount = 0.85
answerNoise = 0.35
answerLivingReward = -1.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3e():
answerDiscount = 1.0
answerNoise = 0.0
answerLivingReward = 1.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question6():
return 'NOT POSSIBLE'
# If not possible, return 'NOT POSSIBLE'
if __name__ == '__main__':
print 'Answers to analysis questions:'
import analysis
for q in [q for q in dir(analysis) if q.startswith('question')]:
response = getattr(analysis, q)()
print ' Question %s:\t%s' % (q, str(response))
| 29.545455 | 78 | 0.715385 |
d40257c6a528d4438e2617ac906f13e0434756ec | 1,766 | py | Python | setup.py | YosaiProject/yosai_dpcache | 85d2c2922165a12ea06315bcbb6a4d6f02729793 | [
"Apache-2.0"
] | 6 | 2015-11-23T15:25:35.000Z | 2017-02-08T16:40:22.000Z | setup.py | YosaiProject/yosai_dpcache | 85d2c2922165a12ea06315bcbb6a4d6f02729793 | [
"Apache-2.0"
] | null | null | null | setup.py | YosaiProject/yosai_dpcache | 85d2c2922165a12ea06315bcbb6a4d6f02729793 | [
"Apache-2.0"
] | 1 | 2019-07-04T09:38:18.000Z | 2019-07-04T09:38:18.000Z | import os
from setuptools import setup, find_packages, Command
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
here = os.path.abspath(os.path.dirname(__file__))
try:
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
except IOError:
VERSION = README = ''
install_requires = [
'yosai',
'redis',
]
setup(
name='yosai_dpcache',
use_scm_version={
'version_scheme': 'post-release',
'local_scheme': 'dirty-tag'
},
description="A caching integration for Yosai",
long_description=README,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Security',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='caching yosai',
author='Darin Gordon',
author_email='[email protected]',
url='http://www.github.com/yosaiproject/yosai_dpcache',
license='Apache License 2.0',
packages=find_packages('.', exclude=['ez_setup', 'test*']),
setup_requires=[
'setuptools_scm >= 1.7.0'
],
install_requires=install_requires,
zip_safe=False,
cmdclass={'clean': CleanCommand}
)
| 26.757576 | 79 | 0.620045 |
6519260ebcd4024b7277e5b11c04ff046f2d817f | 18,018 | py | Python | sdk/python/pulumi_aws/iot/topic_rule.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/iot/topic_rule.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/iot/topic_rule.py | mdop-wh/pulumi-aws | 05bb32e9d694dde1c3b76d440fd2cd0344d23376 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['TopicRule']
class TopicRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cloudwatch_alarm: Optional[pulumi.Input[pulumi.InputType['TopicRuleCloudwatchAlarmArgs']]] = None,
cloudwatch_metric: Optional[pulumi.Input[pulumi.InputType['TopicRuleCloudwatchMetricArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
dynamodb: Optional[pulumi.Input[pulumi.InputType['TopicRuleDynamodbArgs']]] = None,
dynamodbv2s: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['TopicRuleDynamodbv2Args']]]]] = None,
elasticsearch: Optional[pulumi.Input[pulumi.InputType['TopicRuleElasticsearchArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
error_action: Optional[pulumi.Input[pulumi.InputType['TopicRuleErrorActionArgs']]] = None,
firehose: Optional[pulumi.Input[pulumi.InputType['TopicRuleFirehoseArgs']]] = None,
iot_analytics: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['TopicRuleIotAnalyticArgs']]]]] = None,
iot_events: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['TopicRuleIotEventArgs']]]]] = None,
kinesis: Optional[pulumi.Input[pulumi.InputType['TopicRuleKinesisArgs']]] = None,
lambda_: Optional[pulumi.Input[pulumi.InputType['TopicRuleLambdaArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
republish: Optional[pulumi.Input[pulumi.InputType['TopicRuleRepublishArgs']]] = None,
s3: Optional[pulumi.Input[pulumi.InputType['TopicRuleS3Args']]] = None,
sns: Optional[pulumi.Input[pulumi.InputType['TopicRuleSnsArgs']]] = None,
sql: Optional[pulumi.Input[str]] = None,
sql_version: Optional[pulumi.Input[str]] = None,
sqs: Optional[pulumi.Input[pulumi.InputType['TopicRuleSqsArgs']]] = None,
step_functions: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['TopicRuleStepFunctionArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
## Example Usage
```python
import pulumi
import pulumi_aws as aws
mytopic = aws.sns.Topic("mytopic")
myerrortopic = aws.sns.Topic("myerrortopic")
role = aws.iam.Role("role", assume_role_policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "iot.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
\"\"\")
rule = aws.iot.TopicRule("rule",
description="Example rule",
enabled=True,
sql="SELECT * FROM 'topic/test'",
sql_version="2016-03-23",
sns=aws.iot.TopicRuleSnsArgs(
message_format="RAW",
role_arn=role.arn,
target_arn=mytopic.arn,
),
error_action=aws.iot.TopicRuleErrorActionArgs(
sns=aws.iot.TopicRuleErrorActionSnsArgs(
message_format="RAW",
role_arn=role.arn,
target_arn=myerrortopic.arn,
),
))
iam_policy_for_lambda = aws.iam.RolePolicy("iamPolicyForLambda",
role=role.id,
policy=mytopic.arn.apply(lambda arn: f\"\"\"{{
"Version": "2012-10-17",
"Statement": [
{{
"Effect": "Allow",
"Action": [
"sns:Publish"
],
"Resource": "{arn}"
}}
]
}}
\"\"\"))
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: The description of the rule.
:param pulumi.Input[bool] enabled: Specifies whether the rule is enabled.
:param pulumi.Input[pulumi.InputType['TopicRuleErrorActionArgs']] error_action: Configuration block with error action to be associated with the rule. See the documentation for `cloudwatch_alarm`, `cloudwatch_metric`, `dynamodb`, `dynamodbv2`, `elasticsearch`, `firehose`, `iot_analytics`, `iot_events`, `kinesis`, `lambda`, `republish`, `s3`, `step_functions`, `sns`, `sqs` configuration blocks for further configuration details.
:param pulumi.Input[str] name: The name of the rule.
:param pulumi.Input[str] sql: The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference (http://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) in the AWS IoT Developer Guide.
:param pulumi.Input[str] sql_version: The version of the SQL rules engine to use when evaluating the rule.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['cloudwatch_alarm'] = cloudwatch_alarm
__props__['cloudwatch_metric'] = cloudwatch_metric
__props__['description'] = description
__props__['dynamodb'] = dynamodb
__props__['dynamodbv2s'] = dynamodbv2s
__props__['elasticsearch'] = elasticsearch
if enabled is None:
raise TypeError("Missing required property 'enabled'")
__props__['enabled'] = enabled
__props__['error_action'] = error_action
__props__['firehose'] = firehose
__props__['iot_analytics'] = iot_analytics
__props__['iot_events'] = iot_events
__props__['kinesis'] = kinesis
__props__['lambda_'] = lambda_
__props__['name'] = name
__props__['republish'] = republish
__props__['s3'] = s3
__props__['sns'] = sns
if sql is None:
raise TypeError("Missing required property 'sql'")
__props__['sql'] = sql
if sql_version is None:
raise TypeError("Missing required property 'sql_version'")
__props__['sql_version'] = sql_version
__props__['sqs'] = sqs
__props__['step_functions'] = step_functions
__props__['tags'] = tags
__props__['arn'] = None
super(TopicRule, __self__).__init__(
'aws:iot/topicRule:TopicRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
cloudwatch_alarm: Optional[pulumi.Input[pulumi.InputType['TopicRuleCloudwatchAlarmArgs']]] = None,
cloudwatch_metric: Optional[pulumi.Input[pulumi.InputType['TopicRuleCloudwatchMetricArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
dynamodb: Optional[pulumi.Input[pulumi.InputType['TopicRuleDynamodbArgs']]] = None,
dynamodbv2s: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['TopicRuleDynamodbv2Args']]]]] = None,
elasticsearch: Optional[pulumi.Input[pulumi.InputType['TopicRuleElasticsearchArgs']]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
error_action: Optional[pulumi.Input[pulumi.InputType['TopicRuleErrorActionArgs']]] = None,
firehose: Optional[pulumi.Input[pulumi.InputType['TopicRuleFirehoseArgs']]] = None,
iot_analytics: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['TopicRuleIotAnalyticArgs']]]]] = None,
iot_events: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['TopicRuleIotEventArgs']]]]] = None,
kinesis: Optional[pulumi.Input[pulumi.InputType['TopicRuleKinesisArgs']]] = None,
lambda_: Optional[pulumi.Input[pulumi.InputType['TopicRuleLambdaArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
republish: Optional[pulumi.Input[pulumi.InputType['TopicRuleRepublishArgs']]] = None,
s3: Optional[pulumi.Input[pulumi.InputType['TopicRuleS3Args']]] = None,
sns: Optional[pulumi.Input[pulumi.InputType['TopicRuleSnsArgs']]] = None,
sql: Optional[pulumi.Input[str]] = None,
sql_version: Optional[pulumi.Input[str]] = None,
sqs: Optional[pulumi.Input[pulumi.InputType['TopicRuleSqsArgs']]] = None,
step_functions: Optional[pulumi.Input[List[pulumi.Input[pulumi.InputType['TopicRuleStepFunctionArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'TopicRule':
"""
Get an existing TopicRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN of the topic rule
:param pulumi.Input[str] description: The description of the rule.
:param pulumi.Input[bool] enabled: Specifies whether the rule is enabled.
:param pulumi.Input[pulumi.InputType['TopicRuleErrorActionArgs']] error_action: Configuration block with error action to be associated with the rule. See the documentation for `cloudwatch_alarm`, `cloudwatch_metric`, `dynamodb`, `dynamodbv2`, `elasticsearch`, `firehose`, `iot_analytics`, `iot_events`, `kinesis`, `lambda`, `republish`, `s3`, `step_functions`, `sns`, `sqs` configuration blocks for further configuration details.
:param pulumi.Input[str] name: The name of the rule.
:param pulumi.Input[str] sql: The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference (http://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) in the AWS IoT Developer Guide.
:param pulumi.Input[str] sql_version: The version of the SQL rules engine to use when evaluating the rule.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Key-value map of resource tags
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["cloudwatch_alarm"] = cloudwatch_alarm
__props__["cloudwatch_metric"] = cloudwatch_metric
__props__["description"] = description
__props__["dynamodb"] = dynamodb
__props__["dynamodbv2s"] = dynamodbv2s
__props__["elasticsearch"] = elasticsearch
__props__["enabled"] = enabled
__props__["error_action"] = error_action
__props__["firehose"] = firehose
__props__["iot_analytics"] = iot_analytics
__props__["iot_events"] = iot_events
__props__["kinesis"] = kinesis
__props__["lambda_"] = lambda_
__props__["name"] = name
__props__["republish"] = republish
__props__["s3"] = s3
__props__["sns"] = sns
__props__["sql"] = sql
__props__["sql_version"] = sql_version
__props__["sqs"] = sqs
__props__["step_functions"] = step_functions
__props__["tags"] = tags
return TopicRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN of the topic rule
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="cloudwatchAlarm")
def cloudwatch_alarm(self) -> pulumi.Output[Optional['outputs.TopicRuleCloudwatchAlarm']]:
return pulumi.get(self, "cloudwatch_alarm")
@property
@pulumi.getter(name="cloudwatchMetric")
def cloudwatch_metric(self) -> pulumi.Output[Optional['outputs.TopicRuleCloudwatchMetric']]:
return pulumi.get(self, "cloudwatch_metric")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
The description of the rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def dynamodb(self) -> pulumi.Output[Optional['outputs.TopicRuleDynamodb']]:
return pulumi.get(self, "dynamodb")
@property
@pulumi.getter
def dynamodbv2s(self) -> pulumi.Output[Optional[List['outputs.TopicRuleDynamodbv2']]]:
return pulumi.get(self, "dynamodbv2s")
@property
@pulumi.getter
def elasticsearch(self) -> pulumi.Output[Optional['outputs.TopicRuleElasticsearch']]:
return pulumi.get(self, "elasticsearch")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[bool]:
"""
Specifies whether the rule is enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="errorAction")
def error_action(self) -> pulumi.Output[Optional['outputs.TopicRuleErrorAction']]:
"""
Configuration block with error action to be associated with the rule. See the documentation for `cloudwatch_alarm`, `cloudwatch_metric`, `dynamodb`, `dynamodbv2`, `elasticsearch`, `firehose`, `iot_analytics`, `iot_events`, `kinesis`, `lambda`, `republish`, `s3`, `step_functions`, `sns`, `sqs` configuration blocks for further configuration details.
"""
return pulumi.get(self, "error_action")
@property
@pulumi.getter
def firehose(self) -> pulumi.Output[Optional['outputs.TopicRuleFirehose']]:
return pulumi.get(self, "firehose")
@property
@pulumi.getter(name="iotAnalytics")
def iot_analytics(self) -> pulumi.Output[Optional[List['outputs.TopicRuleIotAnalytic']]]:
return pulumi.get(self, "iot_analytics")
@property
@pulumi.getter(name="iotEvents")
def iot_events(self) -> pulumi.Output[Optional[List['outputs.TopicRuleIotEvent']]]:
return pulumi.get(self, "iot_events")
@property
@pulumi.getter
def kinesis(self) -> pulumi.Output[Optional['outputs.TopicRuleKinesis']]:
return pulumi.get(self, "kinesis")
@property
@pulumi.getter(name="lambda")
def lambda_(self) -> pulumi.Output[Optional['outputs.TopicRuleLambda']]:
return pulumi.get(self, "lambda_")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the rule.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def republish(self) -> pulumi.Output[Optional['outputs.TopicRuleRepublish']]:
return pulumi.get(self, "republish")
@property
@pulumi.getter
def s3(self) -> pulumi.Output[Optional['outputs.TopicRuleS3']]:
return pulumi.get(self, "s3")
@property
@pulumi.getter
def sns(self) -> pulumi.Output[Optional['outputs.TopicRuleSns']]:
return pulumi.get(self, "sns")
@property
@pulumi.getter
def sql(self) -> pulumi.Output[str]:
"""
The SQL statement used to query the topic. For more information, see AWS IoT SQL Reference (http://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) in the AWS IoT Developer Guide.
"""
return pulumi.get(self, "sql")
@property
@pulumi.getter(name="sqlVersion")
def sql_version(self) -> pulumi.Output[str]:
"""
The version of the SQL rules engine to use when evaluating the rule.
"""
return pulumi.get(self, "sql_version")
@property
@pulumi.getter
def sqs(self) -> pulumi.Output[Optional['outputs.TopicRuleSqs']]:
return pulumi.get(self, "sqs")
@property
@pulumi.getter(name="stepFunctions")
def step_functions(self) -> pulumi.Output[Optional[List['outputs.TopicRuleStepFunction']]]:
return pulumi.get(self, "step_functions")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key-value map of resource tags
"""
return pulumi.get(self, "tags")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 47.415789 | 437 | 0.636475 |
41700dfd2d919e80a8c7c4411e6ad45ee44d4771 | 1,790 | py | Python | qcodes/tests/legacy/test_generic_formatter.py | simonzihlmann/Qcodes | 6be73df1edc68291c7d0ab9edf2293c1b28d553d | [
"MIT"
] | 1 | 2021-04-07T08:53:05.000Z | 2021-04-07T08:53:05.000Z | qcodes/tests/legacy/test_generic_formatter.py | M1racleShih/Qcodes | c03029a6968e16379155aadc8b083a02e01876a6 | [
"MIT"
] | 230 | 2020-08-17T06:08:33.000Z | 2022-03-29T12:06:58.000Z | qcodes/tests/legacy/test_generic_formatter.py | nicholgroup/Qcodes | 6b9701bf469421fcf2ced58f67c01f69eba9d1f4 | [
"MIT"
] | 4 | 2017-12-11T12:13:41.000Z | 2018-08-01T13:13:04.000Z | from unittest import TestCase
import numpy as np
import qcodes
import qcodes.measure
from qcodes.data.hdf5_format import HDF5Format, HDF5FormatMetadata
from qcodes.data.gnuplot_format import GNUPlotFormat
from qcodes.data.data_set import load_data
from qcodes.tests.legacy.data_mocks import DataSet2D
#%%
class TestFormatters(TestCase):
def setUp(self):
self.formatters = [GNUPlotFormat, HDF5Format, HDF5FormatMetadata]
self.metadata = {'subdict': {'stringlist': ['P1']}, 'string': 'P1',
'int': 1, 'list': [1, 2], 'numpyarray': np.array([1])}
def test_read_write(self):
for f in self.formatters:
print('test formatter %s' % f)
dataset = DataSet2D(name="test_read_write")
dataset.formatter = f()
dataset.add_metadata(self.metadata)
dataset.write(write_metadata=True)
dataset2 = load_data(dataset.location, formatter=f())
self.assertEqual(list(dataset.arrays.keys()),
list(dataset2.arrays.keys()))
# strings should be read and written identically
self.assertEqual(dataset.metadata['string'],
dataset2.metadata['string'])
class TestNoSorting(TestCase):
"""
(WilliamHPNielsen): I'm not too sure where this test belongs... It tests
that parameters with non-sortable keys can be saved using the gnuplot
formatter, so I guess it goes here.
"""
param = qcodes.Parameter(name='mixed_val_mapping_param',
get_cmd=lambda: np.random.randint(1, 3),
val_mapping={1: 1, '2': 2}
)
def test_can_measure(self):
qcodes.measure.Measure(self.param).run()
| 35.098039 | 79 | 0.618994 |
4cd4c59b5d8a1b0f5f2d829f0d65ae95d7a3f460 | 453 | py | Python | genesis/projectmgmt/serializers.py | akshay-web/genesis-app-api | 405c3eca0817819f332ce8df6186f324883c70d1 | [
"MIT"
] | null | null | null | genesis/projectmgmt/serializers.py | akshay-web/genesis-app-api | 405c3eca0817819f332ce8df6186f324883c70d1 | [
"MIT"
] | null | null | null | genesis/projectmgmt/serializers.py | akshay-web/genesis-app-api | 405c3eca0817819f332ce8df6186f324883c70d1 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from projectmgmt.models import Project, User
class ProjectSerializer(serializers.ModelSerializer):
"""Serializer for Project objects"""
users = serializers.SlugRelatedField(queryset=User.objects.all(), many=True, slug_field="username")
class Meta:
model = Project
fields = ('id', 'users', 'title', 'description', 'status', 'start_date', 'end_date')
read_only_fields = ('id',) | 37.75 | 103 | 0.706402 |
eb9d4e6f148f8c98e00b0a55a8601bf2760c27cd | 6,940 | py | Python | mediagoblin/tools/response.py | eliroca/mediagoblin-imported | c4599508b02f2e61df3a97ff314766a62a3e5934 | [
"CC0-1.0"
] | 1 | 2021-09-21T02:24:43.000Z | 2021-09-21T02:24:43.000Z | mediagoblin/tools/response.py | jgarte/mediagoblin-mirror | c4599508b02f2e61df3a97ff314766a62a3e5934 | [
"CC0-1.0"
] | null | null | null | mediagoblin/tools/response.py | jgarte/mediagoblin-mirror | c4599508b02f2e61df3a97ff314766a62a3e5934 | [
"CC0-1.0"
] | 1 | 2021-09-21T02:25:20.000Z | 2021-09-21T02:25:20.000Z | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import werkzeug.utils
from werkzeug.wrappers import Response as wz_Response
from mediagoblin.tools.template import render_template
from mediagoblin.tools.translate import (lazy_pass_to_ugettext as _,
pass_to_ugettext)
from mediagoblin.db.models import UserBan, User
from datetime import date
class Response(wz_Response):
"""Set default response mimetype to HTML, otherwise we get text/plain"""
default_mimetype = 'text/html'
def render_to_response(request, template, context, status=200, mimetype=None):
"""Much like Django's shortcut.render()"""
return Response(
render_template(request, template, context),
status=status,
mimetype=mimetype)
def render_error(request, status=500, title=_('Oops!'),
err_msg=_('An error occured')):
"""Render any error page with a given error code, title and text body
Title and description are passed through as-is to allow html. Make
sure no user input is contained therein for security reasons. The
description will be wrapped in <p></p> tags.
"""
return Response(render_template(request, 'mediagoblin/error.html',
{'err_code': status, 'title': title, 'err_msg': err_msg}),
status=status)
def render_400(request, err_msg=None):
""" Render a standard 400 page"""
_ = pass_to_ugettext
title = _("Bad Request")
if err_msg is None:
err_msg = _("The request sent to the server is invalid, \
please double check it")
return render_error(request, 400, title, err_msg)
def render_403(request):
"""Render a standard 403 page"""
_ = pass_to_ugettext
title = _('Operation not allowed')
err_msg = _("Sorry Dave, I can't let you do that!</p><p>You have tried "
" to perform a function that you are not allowed to. Have you "
"been trying to delete all user accounts again?")
return render_error(request, 403, title, err_msg)
def render_404(request):
"""Render a standard 404 page."""
_ = pass_to_ugettext
err_msg = _("There doesn't seem to be a page at this address. Sorry!</p>"
"<p>If you're sure the address is correct, maybe the page "
"you're looking for has been moved or deleted.")
return render_error(request, 404, err_msg=err_msg)
def render_user_banned(request):
"""Renders the page which tells a user they have been banned, for how long
and the reason why they have been banned"
"""
user_ban = UserBan.query.get(request.user.id)
if (user_ban.expiration_date is not None and
date.today()>user_ban.expiration_date):
user_ban.delete()
return redirect(request,
'index')
return render_to_response(request,
'mediagoblin/banned.html',
{'reason':user_ban.reason,
'expiration_date':user_ban.expiration_date})
def render_http_exception(request, exc, description):
"""Return Response() given a werkzeug.HTTPException
:param exc: werkzeug.HTTPException or subclass thereof
:description: message describing the error."""
# If we were passed the HTTPException stock description on
# exceptions where we have localized ones, use those:
stock_desc = (description == exc.__class__.description)
if stock_desc and exc.code == 403:
return render_403(request)
elif stock_desc and exc.code == 404:
return render_404(request)
return render_error(request, title='{} {}'.format(exc.code, exc.name),
err_msg=description,
status=exc.code)
def redirect(request, *args, **kwargs):
"""Redirects to an URL, using urlgen params or location string
:param querystring: querystring to be appended to the URL
:param location: If the location keyword is given, redirect to the URL
"""
querystring = kwargs.pop('querystring', None)
# Redirect to URL if given by "location=..."
if 'location' in kwargs:
location = kwargs.pop('location')
else:
location = request.urlgen(*args, **kwargs)
if querystring:
location += querystring
return werkzeug.utils.redirect(location)
def redirect_obj(request, obj):
"""Redirect to the page for the given object.
Requires obj to have a .url_for_self method."""
return redirect(request, location=obj.url_for_self(request.urlgen))
def json_response(serializable, _disable_cors=False, *args, **kw):
'''
Serializes a json objects and returns a werkzeug Response object with the
serialized value as the response body and Content-Type: application/json.
:param serializable: A json-serializable object
Any extra arguments and keyword arguments are passed to the
Response.__init__ method.
'''
response = wz_Response(json.dumps(serializable), *args, content_type='application/json', **kw)
if not _disable_cors:
cors_headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST, GET, OPTIONS',
'Access-Control-Allow-Headers': 'Content-Type, X-Requested-With'}
for key, value in cors_headers.items():
response.headers.set(key, value)
return response
def json_error(error_str, status=400, *args, **kwargs):
"""
This is like json_response but takes an error message in and formats
it in {"error": error_str}. This also sets the default HTTP status
code to 400.
"""
return json_response({"error": error_str}, status=status, *args, **kwargs)
def form_response(data, *args, **kwargs):
"""
Responds using application/x-www-form-urlencoded and returns a werkzeug
Response object with the data argument as the body
and 'application/x-www-form-urlencoded' as the Content-Type.
Any extra arguments and keyword arguments are passed to the
Response.__init__ method.
"""
response = wz_Response(
data,
content_type="application/x-www-form-urlencoded",
*args,
**kwargs
)
return response
| 37.112299 | 98 | 0.678818 |
90201ab4c108c6fe161bc3fb619b78903a2bf9e4 | 1,351 | py | Python | tests/components/http/test_real_ip.py | mfrueh/home-assistant | 5d64628b5bf4713016883282fd54de9c7d5089d0 | [
"Apache-2.0"
] | null | null | null | tests/components/http/test_real_ip.py | mfrueh/home-assistant | 5d64628b5bf4713016883282fd54de9c7d5089d0 | [
"Apache-2.0"
] | null | null | null | tests/components/http/test_real_ip.py | mfrueh/home-assistant | 5d64628b5bf4713016883282fd54de9c7d5089d0 | [
"Apache-2.0"
] | 1 | 2021-03-13T18:15:31.000Z | 2021-03-13T18:15:31.000Z | """Test real IP middleware."""
import asyncio
from aiohttp import web
from aiohttp.hdrs import X_FORWARDED_FOR
from homeassistant.components.http.real_ip import setup_real_ip
from homeassistant.components.http.const import KEY_REAL_IP
@asyncio.coroutine
def mock_handler(request):
"""Handler that returns the real IP as text."""
return web.Response(text=str(request[KEY_REAL_IP]))
@asyncio.coroutine
def test_ignore_x_forwarded_for(test_client):
"""Test that we get the IP from the transport."""
app = web.Application()
app.router.add_get('/', mock_handler)
setup_real_ip(app, False)
mock_api_client = yield from test_client(app)
resp = yield from mock_api_client.get('/', headers={
X_FORWARDED_FOR: '255.255.255.255'
})
assert resp.status == 200
text = yield from resp.text()
assert text != '255.255.255.255'
@asyncio.coroutine
def test_use_x_forwarded_for(test_client):
"""Test that we get the IP from the transport."""
app = web.Application()
app.router.add_get('/', mock_handler)
setup_real_ip(app, True)
mock_api_client = yield from test_client(app)
resp = yield from mock_api_client.get('/', headers={
X_FORWARDED_FOR: '255.255.255.255'
})
assert resp.status == 200
text = yield from resp.text()
assert text == '255.255.255.255'
| 27.571429 | 63 | 0.704663 |
dccbb44947a50489876d7376d08a16e8e2f76d4d | 4,912 | py | Python | test/test_symmetries.py | manoelmarques/aqua-chemistry | 931935a1dc3c74d08adb4a4523651b097609630f | [
"Apache-2.0"
] | null | null | null | test/test_symmetries.py | manoelmarques/aqua-chemistry | 931935a1dc3c74d08adb4a4523651b097609630f | [
"Apache-2.0"
] | 1 | 2019-05-15T11:01:07.000Z | 2019-05-15T11:01:07.000Z | test/test_symmetries.py | manoelmarques/aqua-chemistry | 931935a1dc3c74d08adb4a4523651b097609630f | [
"Apache-2.0"
] | 1 | 2022-01-15T15:11:01.000Z | 2022-01-15T15:11:01.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test of Symmetry UCCSD processing.
"""
import itertools
from test.common import QiskitChemistryTestCase
from qiskit import BasicAer
from qiskit.aqua import QuantumInstance, Operator
from qiskit.aqua.algorithms.adaptive import VQE
from qiskit.aqua.components.optimizers import SLSQP
from qiskit.chemistry import QiskitChemistryError
from qiskit.chemistry.core import Hamiltonian, TransformationType, QubitMappingType
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.chemistry.aqua_extensions.components.variational_forms import UCCSD
from qiskit.chemistry.aqua_extensions.components.initial_states import HartreeFock
# from qiskit.chemistry import set_qiskit_chemistry_logging
# import logging
class TestSymmetries(QiskitChemistryTestCase):
"""Test for symmetry processing."""
def setUp(self):
super().setUp()
try:
driver = PySCFDriver(atom='Li .0 .0 .0; H .0 .0 1.6',
unit=UnitsType.ANGSTROM,
charge=0,
spin=0,
basis='sto3g')
except QiskitChemistryError:
self.skipTest('PYSCF driver does not appear to be installed')
self.qmolecule = driver.run()
self.core = Hamiltonian(transformation=TransformationType.FULL,
qubit_mapping=QubitMappingType.PARITY,
two_qubit_reduction=True,
freeze_core=True,
orbital_reduction=[])
self.qubit_op, _ = self.core.run(self.qmolecule)
self.symmetries, self.sq_paulis, self.cliffords, self.sq_list = self.qubit_op.find_Z2_symmetries()
self.reference_energy = -7.882096489442
def test_symmetries(self):
labels = [symm.to_label() for symm in self.symmetries]
self.assertSequenceEqual(labels, ['ZIZIZIZI', 'ZZIIZZII'])
def test_sq_paulis(self):
labels = [sq.to_label() for sq in self.sq_paulis]
self.assertSequenceEqual(labels, ['IIIIIIXI', 'IIIIIXII'])
def test_cliffords(self):
self.assertEqual(2, len(self.cliffords))
def test_sq_list(self):
self.assertSequenceEqual(self.sq_list, [1, 2])
def test_tapered_op(self):
# set_qiskit_chemistry_logging(logging.DEBUG)
tapered_ops = []
for coeff in itertools.product([1, -1], repeat=len(self.sq_list)):
tapered_op = Operator.qubit_tapering(self.qubit_op, self.cliffords, self.sq_list, list(coeff))
tapered_ops.append((list(coeff), tapered_op))
smallest_idx = 0 # Prior knowledge of which tapered_op has ground state
the_tapered_op = tapered_ops[smallest_idx][1]
the_coeff = tapered_ops[smallest_idx][0]
optimizer = SLSQP(maxiter=1000)
init_state = HartreeFock(num_qubits=the_tapered_op.num_qubits,
num_orbitals=self.core._molecule_info['num_orbitals'],
qubit_mapping=self.core._qubit_mapping,
two_qubit_reduction=self.core._two_qubit_reduction,
num_particles=self.core._molecule_info['num_particles'],
sq_list=self.sq_list)
var_form = UCCSD(num_qubits=the_tapered_op.num_qubits, depth=1,
num_orbitals=self.core._molecule_info['num_orbitals'],
num_particles=self.core._molecule_info['num_particles'],
active_occupied=None, active_unoccupied=None,
initial_state=init_state,
qubit_mapping=self.core._qubit_mapping,
two_qubit_reduction=self.core._two_qubit_reduction,
num_time_slices=1,
cliffords=self.cliffords, sq_list=self.sq_list,
tapering_values=the_coeff, symmetries=self.symmetries)
algo = VQE(the_tapered_op, var_form, optimizer, 'matrix')
backend = BasicAer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend=backend)
algo_result = algo.run(quantum_instance)
lines, result = self.core.process_algorithm_result(algo_result)
self.assertAlmostEqual(result['energy'], self.reference_energy, places=6)
| 43.087719 | 106 | 0.64943 |
903323588ab48fa5801bf96a7466acc5d7f44a8b | 4,247 | py | Python | .config/qtile/settings/widgets.py | q3aql/dotfiles | 92375d2217a7e0ed3a36ed1649bc832ad54753f8 | [
"MIT"
] | 1 | 2022-03-28T19:11:18.000Z | 2022-03-28T19:11:18.000Z | .config/qtile/settings/widgets.py | q3aql/dotfiles | 92375d2217a7e0ed3a36ed1649bc832ad54753f8 | [
"MIT"
] | null | null | null | .config/qtile/settings/widgets.py | q3aql/dotfiles | 92375d2217a7e0ed3a36ed1649bc832ad54753f8 | [
"MIT"
] | null | null | null | import psutil
from libqtile import widget
from .theme import colors
from libqtile.widget import base
# Get the icons at https://www.nerdfonts.com/cheat-sheet (you need a Nerd Font)
def base(fg='text', bg='dark'):
return {
'foreground': colors[fg],
'background': colors[bg]
}
def separator():
return widget.Sep(**base(), linewidth=0, padding=5)
def icon(fg='text', bg='dark', fontsize=16, text="?"):
return widget.TextBox(
**base(fg, bg),
fontsize=fontsize,
text=text,
padding=3
)
def powerline(fg="light", bg="dark"):
return widget.TextBox(
**base(fg, bg),
text="", # Icon: nf-oct-triangle_left
fontsize=37,
padding=-2.1
)
def workspaces():
return [
separator(),
widget.GroupBox(
**base(fg='light'),
font='UbuntuMono Nerd Font',
fontsize=15,
#font='Liberation Mono',
#fontsize=14,
margin_y=3,
margin_x=0,
padding_y=8,
padding_x=5,
borderwidth=1,
active=colors['active'],
inactive=colors['inactive'],
rounded=False,
highlight_method='block',
urgent_alert_method='block',
urgent_border=colors['urgent'],
this_current_screen_border=colors['focus'],
this_screen_border=colors['grey'],
other_current_screen_border=colors['dark'],
other_screen_border=colors['dark'],
disable_drag=True
),
separator(),
widget.WindowName(**base(fg='focus'), fontsize=14, padding=5),
separator(),
]
primary_widgets = [
*workspaces(),
separator(),
powerline('color5', 'dark'),
icon(bg="color5", text=' '), # Icon: nf-fa-download
widget.CheckUpdates(
background=colors['color5'],
colour_have_updates=colors['text'],
colour_no_updates=colors['text'],
no_update_string='0',
display_format='{updates}',
update_interval=1800,
#custom_command='checkupdates',
custom_command='bash ~/.config/qtile/checkUpdates.sh',
execute='bash ~/.config/qtile/checkUpdates.sh',
),
powerline('color2', 'color5'),
icon(bg="color2", text=' '), # Icon: nf-fae-chipnf-fae-chip
widget.CPU(
background=colors['color2'],
foreground='222222',
update_interval=1.5,
format='{load_percent}% '
),
widget.ThermalSensor(
background=colors['color2'],
foreground='222222',
update_interval=2.0,
tag_sensor="Tctl",
),
powerline('color3', 'color2'),
icon(bg="color3", text=' '), # Icon: nf-mdi-memory
widget.Memory(
background=colors['color3'],
foreground='222222',
measure_mem='G',
format='{MemUsed:.0f}{mm}/{MemTotal:.0f}{mm} ',
update_interval=2.0,
),
powerline('color5', 'color3'),
icon(bg="color5", text=' '), # Icon: nf-fa-volume_up
widget.Volume(
background=colors['color5'],
foreground='222222',
channel='Master',
fmt='{}',
update_interval=0.2,
),
powerline('color2', 'color5'),
widget.CurrentLayoutIcon(**base(bg='color2'), scale=0.65),
widget.CurrentLayout(**base(bg='color2'), padding=5),
powerline('color1', 'color2'),
icon(bg="color1", fontsize=17, text=' '), # Icon: nf-mdi-calendar_clock
widget.Clock(**base(bg='color1'), format='%d/%m/%Y %H:%M '),
powerline('dark', 'color1'),
widget.Systray(background=colors['dark'], padding=5),
]
secondary_widgets = [
*workspaces(),
separator(),
powerline('color2', 'dark'),
widget.CurrentLayoutIcon(**base(bg='color2'), scale=0.65),
widget.CurrentLayout(**base(bg='color2'), padding=5),
powerline('color1', 'color2'),
icon(bg="color1", fontsize=17, text=' '), # Icon: nf-mdi-calendar_clock
widget.Clock(**base(bg='color1'), format='%d/%m/%Y %H:%M '),
powerline('dark', 'color1'),
]
widget_defaults = {
'font': 'UbuntuMono Nerd Font Bold',
'fontsize': 14,
'padding': 1,
}
extension_defaults = widget_defaults.copy()
| 25.431138 | 79 | 0.572169 |
0f6e2b6a441114fb254b13f9b0a18fd4ddf1148d | 17,221 | py | Python | chives/consensus/multiprocess_validation.py | zcomputerwiz/chivesv2-blockchain | f36235e16a348f7fe3e7f9cdbc92615f391a877b | [
"Apache-2.0"
] | null | null | null | chives/consensus/multiprocess_validation.py | zcomputerwiz/chivesv2-blockchain | f36235e16a348f7fe3e7f9cdbc92615f391a877b | [
"Apache-2.0"
] | null | null | null | chives/consensus/multiprocess_validation.py | zcomputerwiz/chivesv2-blockchain | f36235e16a348f7fe3e7f9cdbc92615f391a877b | [
"Apache-2.0"
] | null | null | null | import asyncio
import logging
import traceback
from concurrent.futures.process import ProcessPoolExecutor
from dataclasses import dataclass
from typing import Dict, List, Optional, Sequence, Tuple, Union, Callable
from chives.consensus.block_header_validation import validate_finished_header_block
from chives.consensus.block_record import BlockRecord
from chives.consensus.blockchain_interface import BlockchainInterface
from chives.consensus.constants import ConsensusConstants
from chives.consensus.cost_calculator import NPCResult
from chives.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from chives.consensus.full_block_to_block_record import block_to_block_record
from chives.consensus.get_block_challenge import get_block_challenge
from chives.consensus.pot_iterations import calculate_iterations_quality, is_overflow_block
from chives.full_node.mempool_check_conditions import get_name_puzzle_conditions
from chives.types.blockchain_format.coin import Coin
from chives.types.blockchain_format.sized_bytes import bytes32
from chives.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from chives.types.full_block import FullBlock
from chives.types.generator_types import BlockGenerator
from chives.types.header_block import HeaderBlock
from chives.types.unfinished_block import UnfinishedBlock
from chives.util.block_cache import BlockCache
from chives.util.errors import Err, ValidationError
from chives.util.generator_tools import get_block_header, tx_removals_and_additions
from chives.util.ints import uint16, uint64, uint32
from chives.util.streamable import Streamable, dataclass_from_dict, streamable
log = logging.getLogger(__name__)
@dataclass(frozen=True)
@streamable
class PreValidationResult(Streamable):
error: Optional[uint16]
required_iters: Optional[uint64] # Iff error is None
npc_result: Optional[NPCResult] # Iff error is None and block is a transaction block
def batch_pre_validate_blocks(
constants_dict: Dict,
blocks_pickled: Dict[bytes, bytes],
full_blocks_pickled: Optional[List[bytes]],
header_blocks_pickled: Optional[List[bytes]],
prev_transaction_generators: List[Optional[bytes]],
npc_results: Dict[uint32, bytes],
check_filter: bool,
expected_difficulty: List[uint64],
expected_sub_slot_iters: List[uint64],
) -> List[bytes]:
blocks: Dict[bytes, BlockRecord] = {}
for k, v in blocks_pickled.items():
blocks[k] = BlockRecord.from_bytes(v)
results: List[PreValidationResult] = []
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
if full_blocks_pickled is not None and header_blocks_pickled is not None:
assert ValueError("Only one should be passed here")
if full_blocks_pickled is not None:
for i in range(len(full_blocks_pickled)):
try:
block: FullBlock = FullBlock.from_bytes(full_blocks_pickled[i])
tx_additions: List[Coin] = []
removals: List[bytes32] = []
npc_result: Optional[NPCResult] = None
if block.height in npc_results:
npc_result = NPCResult.from_bytes(npc_results[block.height])
assert npc_result is not None
if npc_result.npc_list is not None:
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
else:
removals, tx_additions = [], []
if block.transactions_generator is not None and npc_result is None:
prev_generator_bytes = prev_transaction_generators[i]
assert prev_generator_bytes is not None
assert block.transactions_info is not None
block_generator: BlockGenerator = BlockGenerator.from_bytes(prev_generator_bytes)
assert block_generator.program == block.transactions_generator
npc_result = get_name_puzzle_conditions(
block_generator,
min(constants.MAX_BLOCK_COST_CLVM, block.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
safe_mode=True,
)
removals, tx_additions = tx_removals_and_additions(npc_result.npc_list)
header_block = get_block_header(block, tx_additions, removals)
# TODO: address hint error and remove ignore
# error: Argument 1 to "BlockCache" has incompatible type "Dict[bytes, BlockRecord]"; expected
# "Dict[bytes32, BlockRecord]" [arg-type]
required_iters, error = validate_finished_header_block(
constants,
BlockCache(blocks), # type: ignore[arg-type]
header_block,
check_filter,
expected_difficulty[i],
expected_sub_slot_iters[i],
)
error_int: Optional[uint16] = None
if error is not None:
error_int = uint16(error.code.value)
results.append(PreValidationResult(error_int, required_iters, npc_result))
except Exception:
error_stack = traceback.format_exc()
log.error(f"Exception: {error_stack}")
results.append(PreValidationResult(uint16(Err.UNKNOWN.value), None, None))
elif header_blocks_pickled is not None:
for i in range(len(header_blocks_pickled)):
try:
header_block = HeaderBlock.from_bytes(header_blocks_pickled[i])
# TODO: address hint error and remove ignore
# error: Argument 1 to "BlockCache" has incompatible type "Dict[bytes, BlockRecord]"; expected
# "Dict[bytes32, BlockRecord]" [arg-type]
required_iters, error = validate_finished_header_block(
constants,
BlockCache(blocks), # type: ignore[arg-type]
header_block,
check_filter,
expected_difficulty[i],
expected_sub_slot_iters[i],
)
error_int = None
if error is not None:
error_int = uint16(error.code.value)
results.append(PreValidationResult(error_int, required_iters, None))
except Exception:
error_stack = traceback.format_exc()
log.error(f"Exception: {error_stack}")
results.append(PreValidationResult(uint16(Err.UNKNOWN.value), None, None))
return [bytes(r) for r in results]
async def pre_validate_blocks_multiprocessing(
constants: ConsensusConstants,
constants_json: Dict,
block_records: BlockchainInterface,
blocks: Sequence[Union[FullBlock, HeaderBlock]],
pool: ProcessPoolExecutor,
check_filter: bool,
npc_results: Dict[uint32, NPCResult],
get_block_generator: Optional[Callable],
batch_size: int,
wp_summaries: Optional[List[SubEpochSummary]] = None,
) -> Optional[List[PreValidationResult]]:
"""
This method must be called under the blockchain lock
If all the full blocks pass pre-validation, (only validates header), returns the list of required iters.
if any validation issue occurs, returns False.
Args:
check_filter:
constants_json:
pool:
constants:
block_records:
blocks: list of full blocks to validate (must be connected to current chain)
npc_results
get_block_generator
"""
prev_b: Optional[BlockRecord] = None
# Collects all the recent blocks (up to the previous sub-epoch)
recent_blocks: Dict[bytes32, BlockRecord] = {}
recent_blocks_compressed: Dict[bytes32, BlockRecord] = {}
num_sub_slots_found = 0
num_blocks_seen = 0
# log.info("############################################ 159")
# log.info(blocks)
if blocks[0].height > 0:
if not block_records.contains_block(blocks[0].prev_header_hash):
return [PreValidationResult(uint16(Err.INVALID_PREV_BLOCK_HASH.value), None, None)]
curr = block_records.block_record(blocks[0].prev_header_hash)
num_sub_slots_to_look_for = 3 if curr.overflow else 2
while (
curr.sub_epoch_summary_included is None
or num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS
or num_sub_slots_found < num_sub_slots_to_look_for
) and curr.height > 0:
if num_blocks_seen < constants.NUMBER_OF_TIMESTAMPS or num_sub_slots_found < num_sub_slots_to_look_for:
recent_blocks_compressed[curr.header_hash] = curr
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
num_sub_slots_found += len(curr.finished_challenge_slot_hashes)
recent_blocks[curr.header_hash] = curr
if curr.is_transaction_block:
num_blocks_seen += 1
curr = block_records.block_record(curr.prev_hash)
recent_blocks[curr.header_hash] = curr
recent_blocks_compressed[curr.header_hash] = curr
block_record_was_present = []
for block in blocks:
block_record_was_present.append(block_records.contains_block(block.header_hash))
# log.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 187")
# log.info(blocks)
diff_ssis: List[Tuple[uint64, uint64]] = []
for block in blocks:
if block.height != 0:
assert block_records.contains_block(block.prev_header_hash)
if prev_b is None:
prev_b = block_records.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
constants, len(block.finished_sub_slots) > 0, prev_b, block_records
)
overflow = is_overflow_block(constants, block.reward_chain_block.signage_point_index)
challenge = get_block_challenge(constants, block, BlockCache(recent_blocks), prev_b is None, overflow, False)
if block.reward_chain_block.challenge_chain_sp_vdf is None:
cc_sp_hash: bytes32 = challenge
else:
cc_sp_hash = block.reward_chain_block.challenge_chain_sp_vdf.output.get_hash()
q_str: Optional[bytes32] = block.reward_chain_block.proof_of_space.verify_and_get_quality_string(
constants, challenge, cc_sp_hash
)
# log.info("!!q_str 209")
# log.info(q_str)
if q_str is None:
for i, block_i in enumerate(blocks):
if not block_record_was_present[i] and block_records.contains_block(block_i.header_hash):
block_records.remove_block_record(block_i.header_hash)
return None
required_iters: uint64 = calculate_iterations_quality(
constants.DIFFICULTY_CONSTANT_FACTOR,
q_str,
block.reward_chain_block.proof_of_space.size,
difficulty,
cc_sp_hash,
)
block_rec = block_to_block_record(
constants,
block_records,
required_iters,
block,
None,
)
if block_rec.sub_epoch_summary_included is not None and wp_summaries is not None:
idx = int(block.height / constants.SUB_EPOCH_BLOCKS) - 1
next_ses = wp_summaries[idx]
if not block_rec.sub_epoch_summary_included.get_hash() == next_ses.get_hash():
log.error("sub_epoch_summary does not match wp sub_epoch_summary list")
return None
# Makes sure to not override the valid blocks already in block_records
if not block_records.contains_block(block_rec.header_hash):
block_records.add_block_record(block_rec) # Temporarily add block to dict
recent_blocks[block_rec.header_hash] = block_rec
recent_blocks_compressed[block_rec.header_hash] = block_rec
else:
recent_blocks[block_rec.header_hash] = block_records.block_record(block_rec.header_hash)
recent_blocks_compressed[block_rec.header_hash] = block_records.block_record(block_rec.header_hash)
prev_b = block_rec
diff_ssis.append((difficulty, sub_slot_iters))
# log.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 250")
# log.info(blocks)
block_dict: Dict[bytes32, Union[FullBlock, HeaderBlock]] = {}
for i, block in enumerate(blocks):
block_dict[block.header_hash] = block
if not block_record_was_present[i]:
block_records.remove_block_record(block.header_hash)
# log.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 258")
# log.info(blocks)
recent_sb_compressed_pickled = {bytes(k): bytes(v) for k, v in recent_blocks_compressed.items()}
npc_results_pickled = {}
for k, v in npc_results.items():
npc_results_pickled[k] = bytes(v)
futures = []
# log.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 265")
# log.info(blocks)
# Pool of workers to validate blocks concurrently
for i in range(0, len(blocks), batch_size):
end_i = min(i + batch_size, len(blocks))
blocks_to_validate = blocks[i:end_i]
if any([len(block.finished_sub_slots) > 0 for block in blocks_to_validate]):
final_pickled = {bytes(k): bytes(v) for k, v in recent_blocks.items()}
else:
final_pickled = recent_sb_compressed_pickled
b_pickled: Optional[List[bytes]] = None
hb_pickled: Optional[List[bytes]] = None
previous_generators: List[Optional[bytes]] = []
for block in blocks_to_validate:
# We ONLY add blocks which are in the past, based on header hashes (which are validated later) to the
# prev blocks dict. This is important since these blocks are assumed to be valid and are used as previous
# generator references
prev_blocks_dict: Dict[uint32, Union[FullBlock, HeaderBlock]] = {}
curr_b: Union[FullBlock, HeaderBlock] = block
while curr_b.prev_header_hash in block_dict:
curr_b = block_dict[curr_b.prev_header_hash]
prev_blocks_dict[curr_b.header_hash] = curr_b
if isinstance(block, FullBlock):
assert get_block_generator is not None
if b_pickled is None:
b_pickled = []
b_pickled.append(bytes(block))
try:
block_generator: Optional[BlockGenerator] = await get_block_generator(block, prev_blocks_dict)
except ValueError:
return None
if block_generator is not None:
previous_generators.append(bytes(block_generator))
else:
previous_generators.append(None)
else:
if hb_pickled is None:
hb_pickled = []
hb_pickled.append(bytes(block))
futures.append(
asyncio.get_running_loop().run_in_executor(
pool,
batch_pre_validate_blocks,
constants_json,
final_pickled,
b_pickled,
hb_pickled,
previous_generators,
npc_results_pickled,
check_filter,
[diff_ssis[j][0] for j in range(i, end_i)],
[diff_ssis[j][1] for j in range(i, end_i)],
)
)
# Collect all results into one flat list
return [
PreValidationResult.from_bytes(result)
for batch_result in (await asyncio.gather(*futures))
for result in batch_result
]
def _run_generator(
constants_dict: bytes,
unfinished_block_bytes: bytes,
block_generator_bytes: bytes,
) -> Tuple[Optional[Err], Optional[bytes]]:
"""
Runs the CLVM generator from bytes inputs. This is meant to be called under a ProcessPoolExecutor, in order to
validate the heavy parts of a block (clvm program) in a different process.
"""
try:
constants: ConsensusConstants = dataclass_from_dict(ConsensusConstants, constants_dict)
unfinished_block: UnfinishedBlock = UnfinishedBlock.from_bytes(unfinished_block_bytes)
assert unfinished_block.transactions_info is not None
block_generator: BlockGenerator = BlockGenerator.from_bytes(block_generator_bytes)
assert block_generator.program == unfinished_block.transactions_generator
npc_result: NPCResult = get_name_puzzle_conditions(
block_generator,
min(constants.MAX_BLOCK_COST_CLVM, unfinished_block.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
safe_mode=False,
)
if npc_result.error is not None:
return Err(npc_result.error), None
except ValidationError as e:
return e.code, None
except Exception:
return Err.UNKNOWN, None
return None, bytes(npc_result)
| 46.669377 | 117 | 0.65153 |
fe251f9a1033323d1df7bec37d3063d1c868369d | 4,432 | py | Python | dlxtools/pipelines.py | Datalytyx-Data-Science/dlxds | bed58f764eeef689f9921fed976f07bfc6d8439e | [
"MIT"
] | 1 | 2018-10-14T12:52:51.000Z | 2018-10-14T12:52:51.000Z | dlxtools/pipelines.py | Datalytyx-Data-Science/dlxds | bed58f764eeef689f9921fed976f07bfc6d8439e | [
"MIT"
] | null | null | null | dlxtools/pipelines.py | Datalytyx-Data-Science/dlxds | bed58f764eeef689f9921fed976f07bfc6d8439e | [
"MIT"
] | null | null | null | """
This is a pipelines module
Use for creating and saving commonly used pipelines.
"""
#===========================================================================================
#Imports
#===========================================================================================
import numpy as np
import pandas as pd
from sklearn.externals.joblib import Parallel, delayed
from sklearn.pipeline import FeatureUnion, _fit_transform_one, _transform_one
from scipy import sparse
#===========================================================================================
#Pipeline construction tools
#===========================================================================================
class PandasFeatureUnion(FeatureUnion):
"""
A DataFrame estimator that applies a list of transformer objects in parallel to the input data,
then concatenates the results. This is useful to combine several feature extraction mechanisms
into a single transformer.
Parameters
----------
transformer_list : list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first half of each tuple
is the name of the transformer.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel. None means 1 unless in a joblib.parallel_backend context.
-1 means using all processors. See Glossary for more details.
transformer_weights : dict, optional
Multiplicative weights for features per transformer. Keys are transformer names,
values the weights.
Authors
-------
Guy who wrote this one:
Blog: https://zablo.net/blog/post/pandas-dataframe-in-scikit-learn-feature-union
Github: https://github.com/marrrcin/pandas-feature-union
"""
def fit_transform(self, X, y=None, **fit_params):
"""
Fit all transformers, transform the data and concatenate results.
Parameters
----------
X : Pandas DataFrame only
Input data to be transformed.
y : array-like, shape (n_samples, ...), optional
Targets for supervised learning.
Returns
-------
X_t : Pandas DataFrame
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
self._validate_transformers()
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, X, y, weight,
**fit_params)
for name, trans, weight in self._iter())
if not result:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = self.merge_dataframes_by_column(Xs)
return Xs
def merge_dataframes_by_column(self, Xs):
return pd.concat(Xs, axis="columns", copy=False)
def transform(self, X):
"""
Transform X separately by each transformer, concatenate results.
Parameters
----------
X : Pandas DataFrame only
Input data to be transformed.
Returns
-------
X_t : Pandas DataFrame Only
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, weight, X)
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = self.merge_dataframes_by_column(Xs)
return Xs
#===========================================================================================
#Custom Prebuilt Transformers
#===========================================================================================
| 31.432624 | 99 | 0.518051 |
f975e8ccbeb0ec6b56ba919e9a749a899e956162 | 9,000 | py | Python | docs/conf.py | anduingaiden/newrelic-api | 5a13af2a69ee9bdb1b54192fdd6589e8424345d8 | [
"MIT"
] | 41 | 2015-05-21T03:16:31.000Z | 2021-10-06T07:05:01.000Z | docs/conf.py | anduingaiden/newrelic-api | 5a13af2a69ee9bdb1b54192fdd6589e8424345d8 | [
"MIT"
] | 17 | 2015-01-07T19:35:21.000Z | 2020-06-15T15:20:13.000Z | docs/conf.py | anduingaiden/newrelic-api | 5a13af2a69ee9bdb1b54192fdd6589e8424345d8 | [
"MIT"
] | 31 | 2016-01-04T09:51:04.000Z | 2021-09-01T19:36:19.000Z | # -*- coding: utf-8 -*-
#
# newrelic-api documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 24 11:53:02 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import re
def get_version():
"""
Extracts the version number from the version.py file.
"""
VERSION_FILE = '../newrelic_api/version.py'
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
#'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'toc'
# General information about the project.
project = 'newrelic-api'
copyright = '2014, Micah Hausler'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'newrelic-apidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'newrelic-api.tex', u'newrelic-api Documentation',
u'Micah Hausler', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(
'index', 'newrelic-api', 'newrelic-api Documentation',
['Micah Hausler'], 1
)]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
'index', 'newrelic-api', 'newrelic-api Documentation',
'Micah Hausler', 'newrelic-api', 'One line description of project.',
'Miscellaneous'
)]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {
'python': ('http://docs.python.org/3.4', None),
}
| 31.468531 | 96 | 0.712 |
64aa859fa62ab7cbe863d67efb8575737bd78948 | 1,633 | py | Python | vertica_python/vertica/messages/frontend_messages/copy_error.py | HirniMeshram/vertica-python | 1b308d151794b2e962e122ead15a21aec4abc3a0 | [
"Apache-2.0"
] | null | null | null | vertica_python/vertica/messages/frontend_messages/copy_error.py | HirniMeshram/vertica-python | 1b308d151794b2e962e122ead15a21aec4abc3a0 | [
"Apache-2.0"
] | null | null | null | vertica_python/vertica/messages/frontend_messages/copy_error.py | HirniMeshram/vertica-python | 1b308d151794b2e962e122ead15a21aec4abc3a0 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Micro Focus or one of its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
CopyError message
In the copy-local protocol, the frontend can terminate the cycle by sending a
CopyError message, which will cause the COPY SQL statement to fail with an error.
"""
from __future__ import print_function, division, absolute_import
from struct import pack
from ..message import BulkFrontendMessage
class CopyError(BulkFrontendMessage):
message_id = b'e'
def __init__(self, error_msg, stack_trace=None):
BulkFrontendMessage.__init__(self)
self.error_msg = error_msg.encode('utf-8')
self.file_name = stack_trace[0].encode('utf-8') if stack_trace else b''
self.line_number = stack_trace[1] if stack_trace else 0
self.func_name = stack_trace[2].encode('utf-8') if stack_trace else b''
def read_bytes(self):
bytes_ = pack('!{0}sxI{1}sx{2}sx'.format(
len(self.file_name), len(self.func_name), len(self.error_msg)),
self.file_name, self.line_number, self.func_name, self.error_msg)
return bytes_
| 37.113636 | 87 | 0.71831 |
64366273daf3829cf0afaece791eee823c7a305a | 499 | py | Python | 25/02/property_deco.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | null | null | null | 25/02/property_deco.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | 46 | 2017-06-30T22:19:07.000Z | 2017-07-31T22:51:31.000Z | 25/02/property_deco.py | pylangstudy/201707 | c1cc72667f1e0b6e8eef4ee85067d7fa4ca500b6 | [
"CC0-1.0"
] | null | null | null | #class property(fget=None, fset=None, fdel=None, doc=None)
class A:
def __init__(self): self.__x = None
@property
def x(self): print('getx'); return self.__x
@x.setter
def x(self, v): print('setx'); self.__x = v
@x.deleter
def x(self): print('delx'); del self.__x
a = A()
a.x = 100
print(a.x)
del a.x
# * わかりづらい
# * getterは@propertyなのにsetter,deleterはgetter名.setterにせねばならない
# * 名前を何度も書かねばならない
# * 変数名、関数名、アノテーション
# * `x = { get; set; del; }`のように簡単に実装できないものか
| 23.761905 | 64 | 0.627255 |
e3f57ad6cf68a54739cf5742d9ad5dd02c09c576 | 3,702 | py | Python | web/JPS_EMISSIONS/python/AvoidanceCostPlotter.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 21 | 2021-03-08T01:58:25.000Z | 2022-03-09T15:46:16.000Z | web/JPS_EMISSIONS/python/AvoidanceCostPlotter.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 63 | 2021-05-04T15:05:30.000Z | 2022-03-23T14:32:29.000Z | web/JPS_EMISSIONS/python/AvoidanceCostPlotter.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 15 | 2021-03-08T07:52:03.000Z | 2022-03-29T04:46:20.000Z | import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import sys
import json
#from caresjpsutil import PythonLogger
AVOIDANCE_COST_CSV = 'data/input/CO2_avoidance_cost_{}_carbon.csv'
AVOIDANCE_COST_PNG = '/images/{}_carbon_avoidance_cost.png'
if __name__ == "__main__":
#pythonLogger = PythonLogger('AvoidanceCostPlotter.py')
#pythonLogger.postInfoToLogServer('start of AvoidanceCostPlotter.py')
try:
rootPath = json.loads(sys.argv[1])
pathsDict = {}
x = 0
for carbonPrice in [0, 10, 20, 50]:
# ### get Xs and Ys
# load the powerplant database
df = pd.read_csv(rootPath + AVOIDANCE_COST_CSV.format(carbonPrice),
header='infer', sep=',')
if carbonPrice == 0:
x = df.loc[:, ('year')].values
elif carbonPrice == 20 or carbonPrice == 50:
df = df.clip(lower=0)
y_pc_l = df.loc[:, ('PC_avoid_cost_low')].values
y_pc_m = df.loc[:, ('PC_avoid_cost_middle')].values
y_pc_h = df.loc[:, ('PC_avoid_cost_high')].values
y_ngcc_l = df.loc[:, ('NGCC_avoid_cost_low')].values
y_ngcc_m = df.loc[:, ('NGCC_avoid_cost_middle')].values
y_ngcc_h = df.loc[:, ('NGCC_avoid_cost_high')].values
# ### plot
sns.set_style("white")
sns.set_context("paper", font_scale=1)
plt.clf()
figure, ax = plt.subplots(1, 1, figsize=(3, 2.5))
# plot the PC cost
ax.plot(x, y_pc_m, '-', color='#CC4F1B', label='PC', linewidth=1)
ax.plot(x, y_pc_l, '--', color='#CC4F1B', linewidth=1)
ax.plot(x, y_pc_h, '--', color='#CC4F1B', linewidth=1)
# fill the region between y_pc_l and y_pc_h
ax.fill_between(x, y_pc_h, y_pc_l,
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
# plot the NGCC plant cost
ax.plot(x, y_ngcc_m, '-', color='#1B2ACC', label='NGCC',linewidth=1)
ax.plot(x, y_ngcc_l, '--', color='#1B2ACC', linewidth=1)
ax.plot(x, y_ngcc_h, '--', color='#1B2ACC', linewidth=1)
# fill the region between y_ngcc_l and y_ngcc_h
plt.fill_between(x, y_ngcc_h, y_ngcc_l,
alpha=0.5, edgecolor='#1B2ACC', facecolor='#1B2ACC')
ax.legend(loc='best', ncol=1)
# set X and Y coordinates of text according to carbon price
textXCoordinate = 2040 if carbonPrice == 0 else 2027
textYCoordinate = 12 if carbonPrice == 0 else 42
plt.text(textXCoordinate, textYCoordinate,
'Carbon price\n${}/ton'.format(carbonPrice),
bbox=dict(facecolor='grey', alpha=0.5), ha='center', va='top')
ax.set(xlabel='Year', ylabel='CO2 avoidance cost ($/ton)')
ax.set_xlim([2016, 2050])
# set lower range of yticks according to carbon price
ticksYLower = -5 if carbonPrice == 50 else 0
xticks = [2016, 2020, 2030, 2040, 2050]
yticks = np.arange(ticksYLower, 50, 5)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
pathsDict['carbon{}AvoidanceCost'.format(carbonPrice)] = AVOIDANCE_COST_PNG.format(carbonPrice)
plt.savefig(rootPath + 'public' + AVOIDANCE_COST_PNG.format(carbonPrice),
bbox_inches='tight', dpi=800)
print(json.dumps(pathsDict))
except Exception as e:
print(e)
#pythonLogger.postInfoToLogServer('end of AvoidanceCostPlotter.py') | 35.941748 | 107 | 0.573204 |
2019fd8f029bfa40ff5f6bc73ec7deb15db863a9 | 47 | py | Python | src/window/__init__.py | moddedTechnic/Website-Editor | 807222ce3464ab445b6da8a4586b87759300faad | [
"MIT"
] | null | null | null | src/window/__init__.py | moddedTechnic/Website-Editor | 807222ce3464ab445b6da8a4586b87759300faad | [
"MIT"
] | null | null | null | src/window/__init__.py | moddedTechnic/Website-Editor | 807222ce3464ab445b6da8a4586b87759300faad | [
"MIT"
] | null | null | null | from .app import App
from .colour import colour | 23.5 | 26 | 0.808511 |
f81a83a11247496ed297af3f45ee5dc6e447091d | 1,154 | py | Python | website/migrations/0016_auto_20170422_0855.py | sunijsharma/wikitowns | 47d5a895e9df04778f3436b3523a1e68aeacde85 | [
"MIT"
] | 46 | 2018-02-27T18:21:27.000Z | 2022-01-31T23:05:53.000Z | website/migrations/0016_auto_20170422_0855.py | piyushd26/wikitowns | 8480f7e2ce747313b79d356878aeed1ef541b2d9 | [
"MIT"
] | 40 | 2017-02-08T11:42:42.000Z | 2021-12-13T19:46:25.000Z | website/migrations/0016_auto_20170422_0855.py | piyushd26/wikitowns | 8480f7e2ce747313b79d356878aeed1ef541b2d9 | [
"MIT"
] | 7 | 2018-03-01T00:50:05.000Z | 2020-10-01T14:16:57.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-22 08:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('website', '0015_auto_20170419_0905'),
]
operations = [
migrations.AddField(
model_name='bookrecommendation',
name='book_author',
field=models.CharField(default=1, max_length=128),
preserve_default=False,
),
migrations.AddField(
model_name='bookrecommendation',
name='book_description',
field=models.CharField(default=1, max_length=2000),
preserve_default=False,
),
migrations.AddField(
model_name='bookrecommendation',
name='book_url',
field=models.URLField(default=1),
preserve_default=False,
),
migrations.AddField(
model_name='bookrecommendation',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| 28.85 | 74 | 0.604853 |
697fd25337d32b879af5d057239f8fd6beaf8652 | 5,829 | py | Python | touchdown/tests/test_aws_elasticache_cache.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 14 | 2015-01-05T18:18:04.000Z | 2022-02-07T19:35:12.000Z | touchdown/tests/test_aws_elasticache_cache.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 106 | 2015-01-06T00:17:13.000Z | 2019-09-07T00:35:32.000Z | touchdown/tests/test_aws_elasticache_cache.py | yaybu/touchdown | 70ecda5191ce2d095bc074dcb23bfa1584464814 | [
"Apache-2.0"
] | 5 | 2015-01-30T10:18:24.000Z | 2022-02-07T19:35:13.000Z | # Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.stubs.aws import CacheClusterStubber, LaunchConfigurationStubber
class TestCacheClusterCreation(StubberTestCase):
def test_create_cache_cluster(self):
goal = self.create_goal("apply")
cache_cluster = self.fixtures.enter_context(
CacheClusterStubber(
goal.get_service(
self.aws.add_cache_cluster(
name="my-cache-cluster", instance_class="cache.m3.medium"
),
"apply",
)
)
)
cache_cluster.add_describe_cache_clusters_empty_response()
cache_cluster.add_create_cache_cluster()
cache_cluster.add_describe_cache_clusters_one_response(status="creating")
cache_cluster.add_describe_cache_clusters_one_response()
cache_cluster.add_describe_cache_clusters_one_response()
goal.execute()
def test_create_cache_cluster_idempotent(self):
goal = self.create_goal("apply")
cache_cluster = self.fixtures.enter_context(
CacheClusterStubber(
goal.get_service(
self.aws.add_cache_cluster(
name="my-cache-cluster", instance_class="cache.m3.medium"
),
"apply",
)
)
)
cache_cluster.add_describe_cache_clusters_one_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(cache_cluster.resource)), 0)
class TestCacheClusterDeletion(StubberTestCase):
def test_delete_cache_cluster(self):
goal = self.create_goal("destroy")
cache_cluster = self.fixtures.enter_context(
CacheClusterStubber(
goal.get_service(
self.aws.add_cache_cluster(
name="my-cache-cluster", instance_class="cache.m3.medium"
),
"destroy",
)
)
)
cache_cluster.add_describe_cache_clusters_one_response()
cache_cluster.add_delete_cache_cluster()
# Wait for it to go away
cache_cluster.add_describe_cache_clusters_one_response(status="deleting")
cache_cluster.add_describe_cache_clusters_empty_response()
goal.execute()
def test_delete_cache_cluster_idempotent(self):
goal = self.create_goal("destroy")
cache_cluster = self.fixtures.enter_context(
CacheClusterStubber(
goal.get_service(
self.aws.add_cache_cluster(
name="my-cache-cluster", instance_class="cache.m3.medium"
),
"destroy",
)
)
)
cache_cluster.add_describe_cache_clusters_empty_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(cache_cluster.resource)), 0)
class TestCacheClusterComplications(StubberTestCase):
def test_with_launch_configuration(self):
goal = self.create_goal("apply")
cache_cluster = self.fixtures.enter_context(
CacheClusterStubber(
goal.get_service(
self.aws.add_cache_cluster(
name="my-cache-cluster", instance_class="cache.m3.medium"
),
"apply",
)
)
)
cache_cluster.add_describe_cache_clusters_empty_response()
cache_cluster.add_create_cache_cluster()
cache_cluster.add_describe_cache_clusters_one_response(status="creating")
cache_cluster.add_describe_cache_clusters_one_response()
cache_cluster.add_describe_cache_clusters_one_response()
launch_config = self.fixtures.enter_context(
LaunchConfigurationStubber(
goal.get_service(
self.aws.add_launch_configuration(
name="my-test-lc",
image="ami-cba130bc",
instance_type="t2.micro",
json_user_data={
"REDIS_ADDRESS": cache_cluster.resource.endpoint_address,
"REDIS_PORT": cache_cluster.resource.endpoint_port,
},
),
"apply",
)
)
)
user_data = (
'{"REDIS_ADDRESS": "mycacheclu.q68zge.ng.0001.use1devo.elmo-dev.amazonaws.com", '
'"REDIS_PORT": 6379}'
)
launch_config.add_describe_launch_configurations_empty_response()
launch_config.add_describe_launch_configurations_empty_response()
launch_config.add_create_launch_configuration(user_data=user_data)
launch_config.add_describe_launch_configurations_one_response(
user_data=user_data
)
launch_config.add_describe_launch_configurations_one_response(
user_data=user_data
)
launch_config.add_describe_launch_configurations_one_response(
user_data=user_data
)
goal.execute()
| 36.892405 | 93 | 0.616229 |
2f3167da2169136d6435f46a51a42be05e1b6b97 | 572 | py | Python | 7) N11 Web Scraping (Name,Link,OldPrice,NewPrice)/n11WebScraping.py | codesigned4/AdvancedPython | 01cb2ca6080dcb3cfd89a9a33c857f64041094b2 | [
"MIT"
] | null | null | null | 7) N11 Web Scraping (Name,Link,OldPrice,NewPrice)/n11WebScraping.py | codesigned4/AdvancedPython | 01cb2ca6080dcb3cfd89a9a33c857f64041094b2 | [
"MIT"
] | null | null | null | 7) N11 Web Scraping (Name,Link,OldPrice,NewPrice)/n11WebScraping.py | codesigned4/AdvancedPython | 01cb2ca6080dcb3cfd89a9a33c857f64041094b2 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
url="https://www.n11.com/bilgisayar/dizustu-bilgisayar"
html=requests.get(url).content
soup=BeautifulSoup(html,"html.parser")
list=soup.find_all("li",{"class":"column"})
for li in list:
name=li.div.a.h3.text.strip()
link=li.div.a.get("href")
oldPrice=li.find("div",{"class":"proDetail"}).find_all("a")[0].text.strip().strip("TL")
newPrice=li.find("div",{"class":"proDetail"}).find_all("a")[1].text.strip().strip("TL")
print(f"name: {name} link: {link} old price: {oldPrice} new price: {newPrice}") | 33.647059 | 91 | 0.674825 |
47825050f5a832d8c9450307d2166c4fc27a1e46 | 14,330 | py | Python | dlkit/abstract_osid/calendaring/record_templates.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 2 | 2018-02-23T12:16:11.000Z | 2020-10-08T17:54:24.000Z | dlkit/abstract_osid/calendaring/record_templates.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 87 | 2017-04-21T18:57:15.000Z | 2021-12-13T19:43:57.000Z | dlkit/abstract_osid/calendaring/record_templates.py | UOC/dlkit | a9d265db67e81b9e0f405457464e762e2c03f769 | [
"MIT"
] | 1 | 2018-03-01T16:44:25.000Z | 2018-03-01T16:44:25.000Z | """Implementations of calendaring abstract base class records."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class EventRecord:
"""A record for an ``Event``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class EventQueryRecord:
"""A record for an ``EventQuery``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class EventQueryInspectorRecord:
"""A record for an ``EventQueryInspector``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class EventFormRecord:
"""A record for an ``EventForm``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class EventSearchOrderRecord:
"""A record for an ``EventSearchOrder``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class EventSearchRecord:
"""A record for an ``EventSearch``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class EventSearchResultsRecord:
"""A record for an ``EventSearchResults``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class RecurringEventRecord:
"""A record for a ``RecurringEvent``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class RecurringEventQueryRecord:
"""A record for a ``RecurringEventQuery``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class RecurringEventQueryInspectorRecord:
"""A record for a ``RecurringEventQueryInspector``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class RecurringEventFormRecord:
"""A record for a ``RecurringEventForm``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class RecurringEventSearchOrderRecord:
"""A record for a ``RecurringEventSearchOrder``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class RecurringEventSearchRecord:
"""A record for a ``RecurringEventSearch``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class RecurringEventSearchResultsRecord:
"""A record for a ``RecurringEventSearchResults``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class SupersedingEventRecord:
"""A record for a ``SupersedingEvent``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class SupersedingEventQueryRecord:
"""A record for a ``SupersedingEventQuery``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class SupersedingEventQueryInspectorRecord:
"""A record for a ``SupersedingEventQueryInspector``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class SupersedingEventFormRecord:
"""A record for a ``SupersedingEventForm``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class SupersedingEventSearchOrderRecord:
"""A record for a ``SupersedingEventSearchOrder``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class SupersedingEventSearchRecord:
"""A record for a ``SupersedingEventSearch``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class SupersedingEventSearchResultsRecord:
"""A record for a ``SupersedingEventSearchResults``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class OffsetEventRecord:
"""A record for an ``OffsetEvent``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class OffsetEventQueryRecord:
"""A record for an ``OffsetEventQuery``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class OffsetEventQueryInspectorRecord:
"""A record for an ``OffsetEventQueryInspector``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class OffsetEventFormRecord:
"""A record for an ``OffsetEventForm``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class OffsetEventSearchOrderRecord:
"""A record for an ``OffsetEventSearchOrder``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class OffsetEventSearchRecord:
"""A record for an ``OffsetEventSearch``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class OffsetEventSearchResultsRecord:
"""A record for an ``OffsetEventSearchResults``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleRecord:
"""A record for a ``Schedule``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleQueryRecord:
"""A record for a ``ScheduleQuery``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleQueryInspectorRecord:
"""A record for a ``ScheduleQueryInspector``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleFormRecord:
"""A record for a ``ScheduleForm``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleSearchOrderRecord:
"""A record for a ``ScheduleSearchOrder``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleSearchRecord:
"""A record for a ``ScheduleSearch``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleSearchResultsRecord:
"""A record for a ``ScheduleSearchResults``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleSlotRecord:
"""A record for a ``ScheduleSlot``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleSlotQueryRecord:
"""A record for a ``ScheduleSlotQuery``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleSlotQueryInspectorRecord:
"""A record for a ``ScheduleSlotQueryInspector``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleSlotFormRecord:
"""A record for a ``ScheduleSlotForm``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleSlotSearchOrderRecord:
"""A record for a ``ScheduleSlotSearchOrder``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleSlotSearchRecord:
"""A record for a ``ScheduleSlotSearch``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class ScheduleSlotSearchResultsRecord:
"""A record for a ``ScheduleSlotSearchResults``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class TimePeriodRecord:
"""A record for a ``TimePeriod``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class TimePeriodQueryRecord:
"""A record for a ``TimePeriodQuery``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class TimePeriodQueryInspectorRecord:
"""A record for a ``TimePeriodQueryInspector``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class TimePeriodFormRecord:
"""A record for a ``TimePeriodForm``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class TimePeriodSearchOrderRecord:
"""A record for a ``TimePeriodSearchOrder``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class TimePeriodSearchRecord:
"""A record for a ``TimePeriodSearch``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class TimePeriodSearchResultsRecord:
"""A record for a ``TimePeriodSearchResults``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CommitmentRecord:
"""A record for a ``Commitment``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CommitmentQueryRecord:
"""A record for a ``CommitmentQuery``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CommitmentQueryInspectorRecord:
"""A record for a ``CommitmentQueryInspector``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CommitmentFormRecord:
"""A record for a ``CommitmentForm``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CommitmentSearchOrderRecord:
"""A record for a ``CommitmentSearchOrder``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CommitmentSearchRecord:
"""A record for a ``CommitmentSearch``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CommitmentSearchResultsRecord:
"""A record for a ``CommitmentSearchResults``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CalendarRecord:
"""A record for a ``Calendar``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CalendarQueryRecord:
"""A record for a ``CalendarQuery``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CalendarQueryInspectorRecord:
"""A record for a ``CalendarQueryInspector``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CalendarFormRecord:
"""A record for a ``CalendarForm``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CalendarSearchOrderRecord:
"""A record for a ``CalendarSearchOrder``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CalendarSearchRecord:
"""A record for a ``CalendarSearch``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
class CalendarSearchResultsRecord:
"""A record for a ``CalendarSearchResults``.
The methods specified by the record type are available through the
underlying object.
"""
__metaclass__ = abc.ABCMeta
| 22.148377 | 72 | 0.711305 |
7d725d58297dee000c1e78f489f9be27274700e8 | 739 | py | Python | tests/pyre/constraints/isGreaterEqual.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | tests/pyre/constraints/isGreaterEqual.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | tests/pyre/constraints/isGreaterEqual.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Exercise "isGreaterEqual"
"""
def test():
import pyre.constraints
constraint = pyre.constraints.isGreaterEqual(value=1)
constraint.validate(1)
constraint.validate(1.1)
constraint.validate(2)
stranger = 0
try:
constraint.validate(stranger)
except constraint.ConstraintViolationError as error:
assert error.constraint == constraint
assert error.value == stranger
return constraint
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
# do...
test()
# end of file
| 17.595238 | 67 | 0.663058 |
ec0d0ed37bebd84232506e18515bbf9494ae64fe | 16,507 | py | Python | femio/fem_attributes.py | ricosjp/femio | f43991132e530c97477374f4bba25a250a6acae8 | [
"Apache-2.0"
] | 21 | 2020-12-27T11:05:35.000Z | 2022-03-02T15:37:18.000Z | femio/fem_attributes.py | ricosjp/femio | f43991132e530c97477374f4bba25a250a6acae8 | [
"Apache-2.0"
] | null | null | null | femio/fem_attributes.py | ricosjp/femio | f43991132e530c97477374f4bba25a250a6acae8 | [
"Apache-2.0"
] | 2 | 2021-04-28T09:41:09.000Z | 2021-07-01T21:18:45.000Z | from pathlib import Path
import numpy as np
from . import config
from .fem_attribute import FEMAttribute
from .fem_elemental_attribute import FEMElementalAttribute
class FEMAttributes:
"""Represents dictionary of FEMAttributes.
Attributes
----------
data: Dict[str, femio.FEMAttribute]
or Dict[str, femio.FEMElementalAttribute]
"""
@classmethod
def load(cls, npz_file_name, **kwargs):
"""Load data from npz file.
Parameters
----------
npz_file: file, str, or pathlib.Path
Npz file.
Returns
-------
FEMAttributes
"""
npz_file_name = Path(npz_file_name)
if not npz_file_name.is_file():
return cls({})
dict_data = np.load(npz_file_name, allow_pickle=True)
return cls.from_dict(dict_data, **kwargs)
@classmethod
def from_dict(cls, dict_data, **kwargs):
"""Create FEMAttributes object from the specified dict_data.
Parameters
----------
dict_data: Dict[str, numpy.ndarray]
Dict mapping from attribute (ID or data) name to its values.
Returns
-------
FEMAttribute
"""
if 'is_elemental' in kwargs and kwargs['is_elemental']:
attribute_class = FEMElementalAttribute
else:
attribute_class = FEMAttribute
split_dict_data = cls._split_dict_data(dict_data)
return cls({
k: attribute_class.from_dict(k, v)
for k, v in split_dict_data.items()}, **kwargs)
@classmethod
def _split_dict_data(cls, dict_data):
unique_attribute_names = np.unique([
k.split('/')[0] for k in dict_data.keys()])
return {
unique_attribute_name:
{
k: v for k, v in dict_data.items()
if unique_attribute_name == k.split('/')[0]}
for unique_attribute_name in unique_attribute_names}
@classmethod
def from_meshio(cls, ids, dict_data, is_elemental=False):
if is_elemental:
elemental_data = {}
for cell_type in dict_data.keys():
for attribute_name, attribute_data in dict_data[
cell_type].items():
if attribute_name not in elemental_data:
elemental_data[attribute_name] = {}
elemental_data[attribute_name].update({
config.DICT_MESHIO_ELEMENT_TO_FEMIO_ELEMENT[cell_type]:
FEMAttribute(attribute_name, ids, attribute_data)})
attributes = {
attribute_name:
FEMElementalAttribute(attribute_name, attribute_data)
for attribute_name, attribute_data in elemental_data.items()}
return cls(attributes, is_elemental=True)
else:
return cls({
k: FEMAttribute(k, ids, v) for k, v in dict_data.items()})
def __init__(
self, attributes=None, names=None, ids=None, list_arrays=None, *,
is_elemental=False):
"""Initialize FEMAttributes object.
Parameters
----------
attributes: List[femio.FEMAttribute] or Dict[str, femio.FEMAttribute],
optional
List of FEMAttributes.
names: List[str], optional
Attribute names.
ids: List[int] or List[str], optional
List of IDs.
list_arrays: List[numpy.ndarray], optional
List of ndarray.
is_elemental: bool, optional
If True, create dict of FEMElementalAttributes instead of
FEMAttributes. The default is False.
"""
self.is_elemental = is_elemental
if self.is_elemental:
self.attribute_class = FEMElementalAttribute
else:
self.attribute_class = FEMAttribute
if attributes is not None:
if isinstance(attributes, dict):
self.data = attributes
else:
self.data = {
attribute.name: attribute for attribute in attributes}
elif ids is not None and list_arrays is not None:
self.data = {
name: self.attribute_class(name, ids=ids, data=data)
for name, data in zip(names, list_arrays)}
else:
raise ValueError('Feed attributes or (names, ids, list_arrays).')
self.material_overwritten = False
return
def __len__(self):
return len(self.data)
def _get_key(self, key):
if key in self.keys():
return key
if key in config.DICT_ALIASES:
return config.DICT_ALIASES[key]
else:
return key
def __contains__(self, item):
return self._get_key(item) in self.data
def __getitem__(self, key):
if isinstance(key, str):
return self.data[self._get_key(key)]
else:
return [self.data[self._get_key(k)] for k in key]
def __setitem__(self, key, value):
if isinstance(key, str):
self.data[self._get_key(key)] = value
else:
for k in key:
self.data[self._get_key(k)] = value
return
def __delitem__(self, key, value):
self.pop(self._get_key(key))
return
def get_data_length(self):
lengths = np.array([len(v) for v in self.values()])
if np.all(lengths[0] == lengths):
return lengths[0]
else:
raise ValueError('Data has different lengths')
def get_attribute_ids(self, key, *, mandatory=True):
"""Get IDs of the specified attribute.
Parameters
----------
key: str or List[str]
key to access the data.
mandatory: bool, optional
If True, raise ValueError if no data is found. The default is True.
Returns
-------
data: numpy.ndarray or List[numpy.ndarray]
"""
if isinstance(key, str):
self._handle_key_missing(key, mandatory)
return self[self._get_key(key)].ids
else:
for k in key:
self._handle_key_missing(k, mandatory)
return [d.ids for d in self[self._get_key(key)]]
def get_attribute_data(self, key, *, mandatory=True):
"""Get contents of the specified attribute.
Parameters
----------
key: str or List[str]
key to access the data.
mandatory: bool, optional
If True, raise ValueError if no data is found. The default is True.
Returns
-------
data: numpy.ndarray or List[numpy.ndarray]
"""
if isinstance(key, str):
self._handle_key_missing(key, mandatory)
return self[self._get_key(key)].data
else:
for k in key:
self._handle_key_missing(k, mandatory)
return [d.data for d in self[self._get_key(key)]]
def set_attribute_data(
self, key, data, *, allow_overwrite=False, name=None):
"""Set attribute data.
Parameters
----------
key: str
Key of the new data.
data: numpy.ndarray
New data which has the same length as these of existing attributes.
allow_overwrite: bool, optional
If True, allow overwriting existing data. The default is False.
name: str, optional
The name of the new attribute. The default is the same as the key.
"""
if not allow_overwrite and key in self.data:
raise ValueError(
f"Cannot overwrite the existing attribute: {key}.")
if not self.are_same_lengths():
raise ValueError(
"Attributes have various lengths. Specify IDs.")
if name is None:
name = key
ids = list(self.data.values())[0].ids
self[key] = self.attribute_class(name, ids=ids, data=data)
return
def are_same_lengths(self):
"""See if the attributes have the same lengths."""
lengths = np.array([len(v.data) for v in self.data.values()])
return np.all(lengths == lengths[0])
def _handle_key_missing(self, key, mandatory):
if self._get_key(key) not in self.data:
if mandatory:
raise ValueError(
f"{self._get_key(key)} not found in "
f"{self.data.keys()}")
else:
return None
def reset(self):
"""Reset data contents.
Parameters
----------
None
Returns
-------
None
"""
self.data = {}
return
def pop(self, key, default=None):
"""Pop data contents.
Parameters
----------
key: str or List[str]
key to access the data.
Returns
-------
data: numpy.ndarray or List[numpy.ndarray]
"""
if isinstance(key, str):
return self.data.pop(self._get_key(key), default)
else:
return [self.data.pop(self._get_key(k), default) for k in key]
def to_dict(self):
"""Convert to dict.
Parameters
----------
prefix: str, optional
If fed, add f"{prefix}/" to the dictionary key.
Returns
-------
Dict[str, numpy.ndarray]
Dictionay which maps f"{attribute_name}_ids" or
f"{attribute_name}_data" to data contents.
"""
dict_data = {}
for k, v in self.data.items():
dict_data.update(v.to_dict(prefix=k))
return dict_data
def save(self, file_):
"""Save the contents.
Parameters
----------
file_: file, str, or path.Path
File or file name to which the data is saved.
Returns
-------
None
"""
if len(self) == 0:
return
np.savez(file_, **self.to_dict())
return
def update(self, dict_attributes):
"""Update FEMAttribute data with new dictionary.
Parameters
----------
dict_attributes: Dict[str, FEMAttribute] or FEMAttributes
Returns
-------
None
"""
if isinstance(dict_attributes, dict):
for v in dict_attributes.values():
if not isinstance(v, self.attribute_class):
raise ValueError(
f"{v} is not an instance of {self.attribute_class}")
self.data.update(dict_attributes)
elif isinstance(dict_attributes, FEMAttributes):
self.update(dict_attributes.data)
else:
raise ValueError(f"Unknown dict type for: {dict_attributes}")
if self.has_material(dict_attributes):
self.material_overwritten = True
return
def update_time_series(self, list_dict_attributes):
"""Update FEMAttribute data with new dictionary.
Parameters
----------
list_dict_attributes:
List[Dict[str, FEMAttribute]] or List[FEMAttributes]
Returns
-------
None
"""
attribute_names = list(list_dict_attributes[0].keys())
dict_attribute_ids = {
name: list_dict_attributes[0][name].ids
for name in attribute_names}
dict_attribute_data = {
name: self._extract_time_series_data(list_dict_attributes, name)
for name in attribute_names}
dict_attributes = {
name: self.attribute_class(
name, ids=dict_attribute_ids[name],
data=dict_attribute_data[name], silent=True, time_series=True)
for name in attribute_names}
self.update(dict_attributes)
return
def _extract_time_series_data(self, list_dict_attributes, name):
return np.stack([a[name].data for a in list_dict_attributes])
def overwrite(self, name, data, *, ids=None):
"""Overwrite data.
Paremeters
----------
name: str
Attribute name to be overwritten.
data: numpy.ndarray
New data to overwrite with.
ids: numpy.ndarray
IDs for new data.
"""
if name not in self:
raise ValueError(f"{name} not in the data {self.keys()}")
if ids is None:
self[name]._data = data
else:
fem_attribute = FEMAttribute(name, ids=ids, data=data)
self[name] = fem_attribute
if name in config.LIST_MATERIALS:
self.material_overwritten = True
return
def update_data(
self, ids, data_dict,
*, allow_overwrite=False, raise_overwrite=False):
"""Update data with new data_dict.
Parameters
----------
ids: List[str], List[int], str, or int
IDs of FEMAttributes.
data_dict: Dict[str, np.ndarray]
Dictionary of data mapping from property names to property values.
allow_overwrite: bool, optional
If True, allow overwrite existing rows. The default is False.
raise_overwrite: bool, optional
If True, raise ValueError when one tries to overwrite data.
The default is False.
"""
for attribute_name, attribute_value in data_dict.items():
if attribute_name in self:
if raise_overwrite:
raise ValueError(
f"Tries to overwrite {attribute_name}")
self[attribute_name].update(
ids, attribute_value, allow_overwrite=allow_overwrite)
else:
self[attribute_name] = self.attribute_class(
name=attribute_name, ids=ids, data=attribute_value)
if self.has_material(data_dict):
self.material_overwritten = True
return
def get_n_material(self, fem_attributes=None):
"""Count the number of material properties contained in the
fem_attributes.
Parameters
----------
fem_attributes: FEMAttributes, optional
If not fed, self will be used.
Returns
-------
has_material: bool
"""
if fem_attributes is None:
fem_attributes = self
return np.sum(np.array([
material_property_name in fem_attributes
for material_property_name in config.LIST_MATERIALS]))
def has_material(self, fem_attributes=None):
"""Check if fem_attributes have materials.
Parameters
----------
fem_attributes: FEMAttributes, optional
If not fed, self will be used.
Returns
-------
has_material: bool
"""
return self.get_n_material(fem_attributes) > 0
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
def extract_dict(self, ids):
"""Extract FEMAttributes data with IDs.
Parameters
----------
ids: List[str], List[int], str, or int
IDs of FEMAttributes to extract.
Returns
-------
extracted_dict: Dict[str, np.ndarray]
Extracted dict mapping from attribute names to attribute values.
"""
return {k: v.loc[ids].values for k, v in self.items()}
def to_meshio(self):
if self.is_elemental:
cell_data = {}
for attribute_name, attribute_data in self.items():
for element_type, attribute in attribute_data.items():
if len(attribute.data.shape) < 3:
if element_type not in cell_data:
cell_data[element_type] = {}
cell_data[element_type].update({
attribute_name: attribute.data})
return cell_data
else:
return {
attribute_name: attribute_data.data
for attribute_name, attribute_data in self.items()
if len(attribute_data.data.shape) < 3}
def filter_with_ids(self, ids):
return FEMAttributes(
{key: value.filter_with_ids(ids) for key, value in self.items()},
is_elemental=self.is_elemental)
| 31.805395 | 79 | 0.559581 |
ccd2c0d4aeb35678653922376973652f48bb2180 | 2,954 | py | Python | quaternion.py | Gillu13/Rubik-solver | 6185dfb072c114adb69068deb239af7bec28a603 | [
"MIT"
] | null | null | null | quaternion.py | Gillu13/Rubik-solver | 6185dfb072c114adb69068deb239af7bec28a603 | [
"MIT"
] | null | null | null | quaternion.py | Gillu13/Rubik-solver | 6185dfb072c114adb69068deb239af7bec28a603 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 29 22:06:08 2014
@author: Gilles Aouizerate
"""
import numpy as np
class quaternion():
"""A simple quaternion class in order to represent a rotation.
To build a quaternion object, one needs to input either the angle and
the unit vector about which the rotation happens or directly the scalar
and vector parts of the quaternion.
Examples
--------
>>> import quaternion as quat
>>> Q1 = quat.quaternion([1.,0.,0.], angl = 90.)
>>> Q2 = quat.quaternion([(0.5)**0.5,0.,0.], W = (0.5)**0.5)
Notes
-----
See Eugene Salamin: "Application of Quaternions to Computation with
Rotations", Working Paper, Stanford AI Lab, 1979.
"""
def __init__(self, vect, **kwargs):
"""Initializes a quaternion object
Parameters
----------
vect: list of float, depending on kwargs it is be either the
coordonates of the unit vector about which the rotation happens or
directly the vector part of the quaternion
\**kwargs:
* angl: float, the angle of rotation represented by the quaternion.
* W: float,the scalar part of the quatenion object.
"""
for name, value in kwargs.items():
if name=='angl':
self.w = np.cos(value/2.*np.pi/180.)
self.x = vect[0]*np.sin(value/2.*np.pi/180.)
self.y = vect[1]*np.sin(value/2.*np.pi/180.)
self.z = vect[2]*np.sin(value/2.*np.pi/180.)
elif name=='W':
self.w = value
self.x = vect[0]
self.y = vect[1]
self.z = vect[2]
self.set_matrix()
def set_matrix(self):
self.matrix = np.zeros([4,4])
self.matrix[0,0] = self.w**2+self.x**2-self.y**2-self.z**2
self.matrix[1,1] = self.w**2-self.x**2+self.y**2-self.z**2
self.matrix[2,2] = self.w**2-self.x**2-self.y**2+self.z**2
self.matrix[0,1] = 2*self.x*self.y-2*self.w*self.z
self.matrix[0,2] = 2*self.x*self.z+2*self.w*self.y
self.matrix[1,0] = 2*self.x*self.y+2*self.w*self.z
self.matrix[1,2] = 2*self.y*self.z-2*self.w*self.x
self.matrix[2,0] = 2*self.x*self.z-2*self.w*self.y
self.matrix[2,1] = 2*self.y*self.z+2*self.w*self.x
self.matrix[3,3] = 1.
def __mul__(self, other):
w1 = self.w
x1 = self.x
y1 = self.y
z1 = self.z
w2 = other.w
x2 = other.x
y2 = other.y
z2 = other.z
w = w1*w2 - x1*x2 - y1*y2 - z1*z2
x = w1*x2 + x1*w2 + y1*z2 - z1*y2
y = w1*y2 - x1*z2 + y1*w2 + z1*x2
z = w1*z2 + x1*y2 - y1*x2 + z1*w2
return quaternion(np.array([x, y, z]), W = w) | 30.453608 | 76 | 0.509817 |
ec4154b6d5befa15d2e915e402ddf57aa226ff17 | 659 | py | Python | configs/HTRPO_FourRoomMaze.py | HTRPOCODES/HTRPO-v2 | 7e085e8077e6caa38d192bbd33b41c49b36ad6a6 | [
"MIT"
] | 7 | 2020-02-24T15:05:20.000Z | 2021-08-24T02:27:13.000Z | configs/HTRPO_FourRoomMaze.py | HTRPOCODES/HTRPO-v2 | 7e085e8077e6caa38d192bbd33b41c49b36ad6a6 | [
"MIT"
] | null | null | null | configs/HTRPO_FourRoomMaze.py | HTRPOCODES/HTRPO-v2 | 7e085e8077e6caa38d192bbd33b41c49b36ad6a6 | [
"MIT"
] | 1 | 2020-07-27T01:43:18.000Z | 2020-07-27T01:43:18.000Z | HTRPOconfig = {
'reward_decay': 0.95,
'max_kl_divergence': 2e-5,
'goal_space': None,
'per_decision': True,
'GAE_lambda': 0.,
'weighted_is': True,
'using_active_goals' : True,
'hidden_layers': [64,64],
'hidden_layers_v': [64,64],
'max_grad_norm': None,
'lr_v': 5e-4,
'iters_v':10,
# for comparison with HPG
'lr': 1e-3,
# NEED TO FOCUS ON THESE PARAMETERS
'using_hpg': False,
'steps_per_iter': 256,
'sampled_goal_num': None,
'value_type': 'FC',
'using_original_data': False,
'using_kl2':True
}
HTRPOconfig['memory_size'] = HTRPOconfig['steps_per_iter']
| 26.36 | 59 | 0.596358 |
daf034dae047d76a27aafe0282ad9f4a42e6910a | 3,432 | py | Python | xue/uniapply/auditing.py | team-xue/xue | e6bd9539803a2bf902f48b65a9df86356b5d46b2 | [
"BSD-3-Clause"
] | 1 | 2015-11-23T02:33:07.000Z | 2015-11-23T02:33:07.000Z | xue/uniapply/auditing.py | team-xue/xue | e6bd9539803a2bf902f48b65a9df86356b5d46b2 | [
"BSD-3-Clause"
] | null | null | null | xue/uniapply/auditing.py | team-xue/xue | e6bd9539803a2bf902f48b65a9df86356b5d46b2 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, division
__all__ = [
'is_expired',
'is_auditer',
'is_class_applicable',
'get_auditing_status',
'check_target_user',
]
import datetime
from collections import OrderedDict
from itertools import izip
from .models import *
def is_expired(tgt):
return not (tgt.start_date < datetime.datetime.today() < tgt.end_date)
def is_auditer(rules, user):
return any(rule.auditer == user for rule in rules)
def is_class_applicable(tgt, user):
if user.profile.role != 0:
# not a student, hence no associated logical class
return False
# XXX optimize the query
return user.central_info.klass in tgt.allowed_classes.all()
def get_auditing_status(entry):
target = entry.target
rules = AuditingRule.objects.filter(target=target)
# iterate through the rules
prioritized_rules = sorted((rule.niceness, rule, ) for rule in rules)
if not prioritized_rules:
# no rules defined, pass by default...
return 1, OrderedDict(), None
rule_results, statuses = OrderedDict(), []
for niceness, rule in prioritized_rules:
try:
result = AuditOutcome.objects.get(entry=entry, rule=rule)
except AuditOutcome.DoesNotExist:
# final result not known yet, i.e. pending
# add a fake result item to reflect this
result = None
rule_results[rule] = result
statuses.append(result.status if result is not None else 0)
status = 0
if all(i == 1 for i in statuses):
# pass
status = 1
elif any(i == 2 for i in statuses):
# if one outcome fail, the whole application fail
status = 2
# get the next rule to be examined
if status == 1 or status == 2:
# no more outstanding rules
next_rule = None
else:
for (niceness, rule, ), status in izip(prioritized_rules, statuses, ):
if status == 0:
next_rule = rule
break
return status, rule_results, next_rule
def check_target_user(target, user):
if user.is_staff or target.user == user:
# Superuser can see EVERYTHING; also owner can see his items
return True, True
if is_expired(target):
return False, False
# cache the rule objects to reduce db impact
# force the queryset to hit db (no point deferring it anyway)
rules = list(AuditingRule.objects.filter(target=target))
if user.profile.role == 0:
# XXX is student able to become auditer?
if is_auditer(rules, user):
return True, True
# check if its class is relevant
if not is_class_applicable(target, user):
return False, False
# visible but not manageable (for ordinary students)
return True, False
else:
# check if the user is an auditer
if is_auditer(target, user):
return True, True
return False, False
raise RuntimeError(u'Impossible codepath!')
def get_imm_auditable_rule(entry, user):
target = entry.target
rules = AuditingRule.objects.filter(target=target)
# iterate through the rules
prioritized_rules = sorted((rule.niceness, rule, ) for rule in rules)
#for niceness, rule in prioritized_rules:
# if rule.auditer
# vim:set ai et ts=4 sw=4 sts=4 fenc=utf-8:
| 27.677419 | 78 | 0.640734 |
f94772ffe2d59cb509ab3ebcef5ac84013dfc229 | 3,057 | py | Python | models.py | Ch4nYH/Self-PU | 3df125bcab2c7cc5f0b1160ebc60504bf8e9a73c | [
"MIT"
] | null | null | null | models.py | Ch4nYH/Self-PU | 3df125bcab2c7cc5f0b1160ebc60504bf8e9a73c | [
"MIT"
] | null | null | null | models.py | Ch4nYH/Self-PU | 3df125bcab2c7cc5f0b1160ebc60504bf8e9a73c | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class MultiLayerPerceptron(nn.Module):
def __init__(self, dim):
super(MultiLayerPerceptron, self).__init__()
self.l1 = nn.Linear(dim, 300, bias=False)
self.bn1 = nn.BatchNorm1d(300)
self.l2 = nn.Linear(300, 300, bias=False)
self.bn2 = nn.BatchNorm1d(300)
self.l3 = nn.Linear(300, 300, bias=False)
self.bn3 = nn.BatchNorm1d(300)
self.l4 = nn.Linear(300, 300, bias=False)
self.bn4 = nn.BatchNorm1d(300)
self.l5 = nn.Linear(300, 1)
def forward(self, x):
x = self.l1(x)
x = x.view(-1, 300)
x = self.bn1(x)
x = F.relu(x)
x = self.l2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.l3(x)
x = self.bn3(x)
x = F.relu(x)
x = self.l4(x)
x = self.bn4(x)
x = F.relu(x)
x = self.l5(x)
return x
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(3, 96, kernel_size = 3, padding=1)
self.bn1 = nn.BatchNorm2d(96)
self.conv2 = nn.Conv2d(96, 96, kernel_size = 3, padding=1)
self.bn2 = nn.BatchNorm2d(96)
self.conv3 = nn.Conv2d(96, 96, kernel_size = 3, stride = 2, padding=1)
self.bn3 = nn.BatchNorm2d(96)
self.conv4 = nn.Conv2d(96, 192, kernel_size = 3, padding=1)
self.bn4 = nn.BatchNorm2d(192)
self.conv5 = nn.Conv2d(192, 192, kernel_size = 3, padding=1)
self.bn5 = nn.BatchNorm2d(192)
self.conv6 = nn.Conv2d(192, 192, kernel_size = 3, stride = 2, padding=1)
self.bn6 = nn.BatchNorm2d(192)
self.conv7 = nn.Conv2d(192, 192, kernel_size = 3, padding=1)
self.bn7 = nn.BatchNorm2d(192)
self.conv8 = nn.Conv2d(192, 192, kernel_size = 1)
self.bn8 = nn.BatchNorm2d(192)
self.conv9 = nn.Conv2d(192, 10, kernel_size = 1)
self.bn9 = nn.BatchNorm2d(10)
self.l1 = nn.Linear(640, 1000)
self.l2 = nn.Linear(1000, 1000)
self.l3 = nn.Linear(1000, 1)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = F.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = F.relu(x)
x = self.conv5(x)
x = self.bn5(x)
x = F.relu(x)
x = self.conv6(x)
x = self.bn6(x)
x = F.relu(x)
x = self.conv7(x)
x = self.bn7(x)
x = F.relu(x)
x = self.conv8(x)
x = self.bn8(x)
x = F.relu(x)
x = self.conv9(x)
x = self.bn9(x)
x = F.relu(x)
x = x.view(-1, 640)
x = self.l1(x)
x = F.relu(x)
x = self.l2(x)
x = F.relu(x)
x = self.l3(x)
return x
if __name__ == '__main__':
x = torch.zeros((1, 3, 32, 32))
model = CNN()
print(model(x)) | 29.970588 | 80 | 0.511286 |
5d7e0ef459a865abd85a33a7b3567a2444540ab3 | 571 | py | Python | h/events.py | gnott/h | 77a0452b8196f7efb97d4a400ce7583062d620e6 | [
"MIT"
] | null | null | null | h/events.py | gnott/h | 77a0452b8196f7efb97d4a400ce7583062d620e6 | [
"MIT"
] | null | null | null | h/events.py | gnott/h | 77a0452b8196f7efb97d4a400ce7583062d620e6 | [
"MIT"
] | null | null | null | __all__ = [
'NewRegistrationEvent',
'RegistrationActivatedEvent',
'PasswordResetEvent',
'ProfileUpdatedEvent',
]
from horus.events import (
NewRegistrationEvent,
RegistrationActivatedEvent,
PasswordResetEvent,
ProfileUpdatedEvent
)
class AnnotationEvent(object):
def __init__(self, request, annotation, action):
self.request = request
self.annotation = annotation
self.action = action
class LoginEvent(object):
def __init__(self, request, user):
self.request = request
self.user = user
| 21.148148 | 52 | 0.688266 |
d54bc79507843ffd4d2ee84c93fd24f155bd2870 | 3,096 | py | Python | python_visual_animation.py | wongself/TSP | ced936ade3bf1745b94350028535913658cd7391 | [
"MIT"
] | null | null | null | python_visual_animation.py | wongself/TSP | ced936ade3bf1745b94350028535913658cd7391 | [
"MIT"
] | null | null | null | python_visual_animation.py | wongself/TSP | ced936ade3bf1745b94350028535913658cd7391 | [
"MIT"
] | null | null | null | # _*_ coding: utf-8 _*_
"""
python_visual_animation.py by xianhu
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# 解决中文乱码问题
matplotlib.rcParams["axes.unicode_minus"] = False
def simple_plot():
"""
simple plot
"""
# 生成画布
plt.figure(figsize=(8, 6), dpi=80)
# 打开交互模式
plt.ion()
# 循环
for index in range(100):
# 清除原有图像
plt.cla()
# 设定标题等
plt.title("Jus")
plt.grid(True)
# 生成测试数据
x = np.linspace(-np.pi + 0.1 * index,
np.pi + 0.1 * index,
256,
endpoint=True)
y_cos, y_sin = np.cos(x), np.sin(x)
# 设置X轴
plt.xlabel("X")
plt.xlim(-4 + 0.1 * index, 4 + 0.1 * index)
plt.xticks(
np.linspace(-4 + 0.1 * index, 4 + 0.1 * index, 9, endpoint=True))
# 设置Y轴
plt.ylabel("Y")
plt.ylim(-1.0, 1.0)
plt.yticks(np.linspace(-1, 1, 9, endpoint=True))
# 画两条曲线
plt.plot(x, y_cos, "b--", linewidth=2.0, label="cos")
plt.plot(x, y_sin, "g-", linewidth=2.0, label="sin")
# 设置图例位置,loc可以为[upper, lower, left, right, center]
plt.legend(loc="upper left", shadow=True)
# 暂停
plt.pause(0.05)
# 关闭交互模式
plt.ioff()
# 图形显示
plt.show()
return
simple_plot()
def scatter_plot():
"""
scatter plot
"""
# 打开交互模式
plt.ion()
# 循环
for index in range(50):
# 清除原有图像
# plt.cla()
# 设定标题等
plt.title("动态散点图")
plt.grid(True)
# 生成测试数据
point_count = 5
x_index = np.random.random(point_count)
y_index = np.random.random(point_count)
# 设置相关参数
color_list = np.random.random(point_count)
scale_list = np.random.random(point_count) * 100
# 画散点图
plt.scatter(x_index, y_index, s=scale_list, c=color_list, marker="o")
# 暂停
plt.pause(0.2)
# 关闭交互模式
plt.ioff()
# 显示图形
plt.show()
return
# scatter_plot()
def three_dimension_scatter():
"""
3d scatter plot
"""
# 生成画布
fig = plt.figure()
# 打开交互模式
plt.ion()
# 循环
for index in range(50):
# 清除原有图像
fig.clf()
# 设定标题等
fig.suptitle("三维动态散点图")
# 生成测试数据
point_count = 100
x = np.random.random(point_count)
y = np.random.random(point_count)
z = np.random.random(point_count)
color = np.random.random(point_count)
scale = np.random.random(point_count) * 100
# 生成画布
ax = fig.add_subplot(111, projection="3d")
# 画三维散点图
ax.scatter(x, y, z, s=scale, c=color, marker=".")
# 设置坐标轴图标
ax.set_xlabel("X Label")
ax.set_ylabel("Y Label")
ax.set_zlabel("Z Label")
# 设置坐标轴范围
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 1)
# 暂停
plt.pause(0.2)
# 关闭交互模式
plt.ioff()
# 图形显示
plt.show()
return
# three_dimension_scatter()
| 18.428571 | 77 | 0.510336 |
9e052c6b434f00e47bb8a691a1dd1a48a0cdbd7a | 51,268 | py | Python | virtual/lib/python3.6/site-packages/sqlalchemy/sql/type_api.py | kenmutuma001/Blog | 6b19a77b71694bbe9f5e84207de46c68f87ebc5e | [
"Unlicense"
] | 27 | 2019-10-28T05:03:18.000Z | 2021-06-09T00:16:22.000Z | virtual/lib/python3.6/site-packages/sqlalchemy/sql/type_api.py | kenmutuma001/Blog | 6b19a77b71694bbe9f5e84207de46c68f87ebc5e | [
"Unlicense"
] | 12 | 2020-02-03T11:43:02.000Z | 2020-03-02T14:21:10.000Z | virtual/lib/python3.6/site-packages/sqlalchemy/sql/type_api.py | kenmutuma001/Blog | 6b19a77b71694bbe9f5e84207de46c68f87ebc5e | [
"Unlicense"
] | 21 | 2017-11-13T13:23:27.000Z | 2019-10-07T02:00:52.000Z | # sql/types_api.py
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base types API.
"""
from . import operators
from .base import SchemaEventTarget
from .visitors import Visitable
from .visitors import VisitableType
from .. import exc
from .. import util
# these are back-assigned by sqltypes.
BOOLEANTYPE = None
INTEGERTYPE = None
NULLTYPE = None
STRINGTYPE = None
MATCHTYPE = None
INDEXABLE = None
_resolve_value_to_type = None
class TypeEngine(Visitable):
"""The ultimate base class for all SQL datatypes.
Common subclasses of :class:`.TypeEngine` include
:class:`.String`, :class:`.Integer`, and :class:`.Boolean`.
For an overview of the SQLAlchemy typing system, see
:ref:`types_toplevel`.
.. seealso::
:ref:`types_toplevel`
"""
_sqla_type = True
_isnull = False
class Comparator(operators.ColumnOperators):
"""Base class for custom comparison operations defined at the
type level. See :attr:`.TypeEngine.comparator_factory`.
"""
__slots__ = "expr", "type"
default_comparator = None
def __init__(self, expr):
self.expr = expr
self.type = expr.type
@util.dependencies("sqlalchemy.sql.default_comparator")
def operate(self, default_comparator, op, *other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, *(other + o[1:]), **kwargs)
@util.dependencies("sqlalchemy.sql.default_comparator")
def reverse_operate(self, default_comparator, op, other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, other, reverse=True, *o[1:], **kwargs)
def _adapt_expression(self, op, other_comparator):
"""evaluate the return type of <self> <op> <othertype>,
and apply any adaptations to the given operator.
This method determines the type of a resulting binary expression
given two source types and an operator. For example, two
:class:`.Column` objects, both of the type :class:`.Integer`, will
produce a :class:`.BinaryExpression` that also has the type
:class:`.Integer` when compared via the addition (``+``) operator.
However, using the addition operator with an :class:`.Integer`
and a :class:`.Date` object will produce a :class:`.Date`, assuming
"days delta" behavior by the database (in reality, most databases
other than PostgreSQL don't accept this particular operation).
The method returns a tuple of the form <operator>, <type>.
The resulting operator and type will be those applied to the
resulting :class:`.BinaryExpression` as the final operator and the
right-hand side of the expression.
Note that only a subset of operators make usage of
:meth:`._adapt_expression`,
including math operators and user-defined operators, but not
boolean comparison or special SQL keywords like MATCH or BETWEEN.
"""
return op, self.type
def __reduce__(self):
return _reconstitute_comparator, (self.expr,)
hashable = True
"""Flag, if False, means values from this type aren't hashable.
Used by the ORM when uniquing result lists.
"""
comparator_factory = Comparator
"""A :class:`.TypeEngine.Comparator` class which will apply
to operations performed by owning :class:`.ColumnElement` objects.
The :attr:`.comparator_factory` attribute is a hook consulted by
the core expression system when column and SQL expression operations
are performed. When a :class:`.TypeEngine.Comparator` class is
associated with this attribute, it allows custom re-definition of
all existing operators, as well as definition of new operators.
Existing operators include those provided by Python operator overloading
such as :meth:`.operators.ColumnOperators.__add__` and
:meth:`.operators.ColumnOperators.__eq__`,
those provided as standard
attributes of :class:`.operators.ColumnOperators` such as
:meth:`.operators.ColumnOperators.like`
and :meth:`.operators.ColumnOperators.in_`.
Rudimentary usage of this hook is allowed through simple subclassing
of existing types, or alternatively by using :class:`.TypeDecorator`.
See the documentation section :ref:`types_operators` for examples.
"""
should_evaluate_none = False
"""If True, the Python constant ``None`` is considered to be handled
explicitly by this type.
The ORM uses this flag to indicate that a positive value of ``None``
is passed to the column in an INSERT statement, rather than omitting
the column from the INSERT statement which has the effect of firing
off column-level defaults. It also allows types which have special
behavior for Python None, such as a JSON type, to indicate that
they'd like to handle the None value explicitly.
To set this flag on an existing type, use the
:meth:`.TypeEngine.evaluates_none` method.
.. seealso::
:meth:`.TypeEngine.evaluates_none`
.. versionadded:: 1.1
"""
def evaluates_none(self):
"""Return a copy of this type which has the :attr:`.should_evaluate_none`
flag set to True.
E.g.::
Table(
'some_table', metadata,
Column(
String(50).evaluates_none(),
nullable=True,
server_default='no value')
)
The ORM uses this flag to indicate that a positive value of ``None``
is passed to the column in an INSERT statement, rather than omitting
the column from the INSERT statement which has the effect of firing
off column-level defaults. It also allows for types which have
special behavior associated with the Python None value to indicate
that the value doesn't necessarily translate into SQL NULL; a
prime example of this is a JSON type which may wish to persist the
JSON value ``'null'``.
In all cases, the actual NULL SQL value can be always be
persisted in any column by using
the :obj:`~.expression.null` SQL construct in an INSERT statement
or associated with an ORM-mapped attribute.
.. note::
The "evaluates none" flag does **not** apply to a value
of ``None`` passed to :paramref:`.Column.default` or
:paramref:`.Column.server_default`; in these cases, ``None``
still means "no default".
.. versionadded:: 1.1
.. seealso::
:ref:`session_forcing_null` - in the ORM documentation
:paramref:`.postgresql.JSON.none_as_null` - PostgreSQL JSON
interaction with this flag.
:attr:`.TypeEngine.should_evaluate_none` - class-level flag
"""
typ = self.copy()
typ.should_evaluate_none = True
return typ
def copy(self, **kw):
return self.adapt(self.__class__)
def compare_against_backend(self, dialect, conn_type):
"""Compare this type against the given backend type.
This function is currently not implemented for SQLAlchemy
types, and for all built in types will return ``None``. However,
it can be implemented by a user-defined type
where it can be consumed by schema comparison tools such as
Alembic autogenerate.
A future release of SQLAlchemy will potentially implement this method
for builtin types as well.
The function should return True if this type is equivalent to the
given type; the type is typically reflected from the database
so should be database specific. The dialect in use is also
passed. It can also return False to assert that the type is
not equivalent.
:param dialect: a :class:`.Dialect` that is involved in the comparison.
:param conn_type: the type object reflected from the backend.
.. versionadded:: 1.0.3
"""
return None
def copy_value(self, value):
return value
def literal_processor(self, dialect):
"""Return a conversion function for processing literal values that are
to be rendered directly without using binds.
This function is used when the compiler makes use of the
"literal_binds" flag, typically used in DDL generation as well
as in certain scenarios where backends don't accept bound parameters.
.. versionadded:: 0.9.0
"""
return None
def bind_processor(self, dialect):
"""Return a conversion function for processing bind values.
Returns a callable which will receive a bind parameter value
as the sole positional argument and will return a value to
send to the DB-API.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
"""
return None
def result_processor(self, dialect, coltype):
"""Return a conversion function for processing result row values.
Returns a callable which will receive a result row column
value as the sole positional argument and will return a value
to return to the user.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
:param coltype: DBAPI coltype argument received in cursor.description.
"""
return None
def column_expression(self, colexpr):
"""Given a SELECT column expression, return a wrapping SQL expression.
This is typically a SQL function that wraps a column expression
as rendered in the columns clause of a SELECT statement.
It is used for special data types that require
columns to be wrapped in some special database function in order
to coerce the value before being sent back to the application.
It is the SQL analogue of the :meth:`.TypeEngine.result_processor`
method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
.. seealso::
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_column_expression(self):
"""memoized boolean, check if column_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return (
self.__class__.column_expression.__code__
is not TypeEngine.column_expression.__code__
)
def bind_expression(self, bindvalue):
""""Given a bind value (i.e. a :class:`.BindParameter` instance),
return a SQL expression in its place.
This is typically a SQL function that wraps the existing bound
parameter within the statement. It is used for special data types
that require literals being wrapped in some special database function
in order to coerce an application-level value into a database-specific
format. It is the SQL analogue of the
:meth:`.TypeEngine.bind_processor` method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
Note that this method, when implemented, should always return
the exact same structure, without any conditional logic, as it
may be used in an executemany() call against an arbitrary number
of bound parameter sets.
.. seealso::
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_bind_expression(self):
"""memoized boolean, check if bind_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return (
self.__class__.bind_expression.__code__
is not TypeEngine.bind_expression.__code__
)
@staticmethod
def _to_instance(cls_or_self):
return to_instance(cls_or_self)
def compare_values(self, x, y):
"""Compare two values for equality."""
return x == y
def get_dbapi_type(self, dbapi):
"""Return the corresponding type object from the underlying DB-API, if
any.
This can be useful for calling ``setinputsizes()``, for example.
"""
return None
@property
def python_type(self):
"""Return the Python type object expected to be returned
by instances of this type, if known.
Basically, for those types which enforce a return type,
or are known across the board to do such for all common
DBAPIs (like ``int`` for example), will return that type.
If a return type is not defined, raises
``NotImplementedError``.
Note that any type also accommodates NULL in SQL which
means you can also get back ``None`` from any type
in practice.
"""
raise NotImplementedError()
def with_variant(self, type_, dialect_name):
r"""Produce a new type object that will utilize the given
type when applied to the dialect of the given name.
e.g.::
from sqlalchemy.types import String
from sqlalchemy.dialects import mysql
s = String()
s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql')
The construction of :meth:`.TypeEngine.with_variant` is always
from the "fallback" type to that which is dialect specific.
The returned type is an instance of :class:`.Variant`, which
itself provides a :meth:`.Variant.with_variant`
that can be called repeatedly.
:param type\_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
"""
return Variant(self, {dialect_name: to_instance(type_)})
@util.memoized_property
def _type_affinity(self):
"""Return a rudimental 'affinity' value expressing the general class
of type."""
typ = None
for t in self.__class__.__mro__:
if t in (TypeEngine, UserDefinedType):
return typ
elif issubclass(t, (TypeEngine, UserDefinedType)):
typ = t
else:
return self.__class__
def dialect_impl(self, dialect):
"""Return a dialect-specific implementation for this
:class:`.TypeEngine`.
"""
try:
return dialect._type_memos[self]["impl"]
except KeyError:
return self._dialect_info(dialect)["impl"]
def _unwrapped_dialect_impl(self, dialect):
"""Return the 'unwrapped' dialect impl for this type.
For a type that applies wrapping logic (e.g. TypeDecorator), give
us the real, actual dialect-level type that is used.
This is used by TypeDecorator itself as well at least one case where
dialects need to check that a particular specific dialect-level
type is in use, within the :meth:`.DefaultDialect.set_input_sizes`
method.
"""
return self.dialect_impl(dialect)
def _cached_literal_processor(self, dialect):
"""Return a dialect-specific literal processor for this type."""
try:
return dialect._type_memos[self]["literal"]
except KeyError:
d = self._dialect_info(dialect)
d["literal"] = lp = d["impl"].literal_processor(dialect)
return lp
def _cached_bind_processor(self, dialect):
"""Return a dialect-specific bind processor for this type."""
try:
return dialect._type_memos[self]["bind"]
except KeyError:
d = self._dialect_info(dialect)
d["bind"] = bp = d["impl"].bind_processor(dialect)
return bp
def _cached_result_processor(self, dialect, coltype):
"""Return a dialect-specific result processor for this type."""
try:
return dialect._type_memos[self][coltype]
except KeyError:
d = self._dialect_info(dialect)
# key assumption: DBAPI type codes are
# constants. Else this dictionary would
# grow unbounded.
d[coltype] = rp = d["impl"].result_processor(dialect, coltype)
return rp
def _cached_custom_processor(self, dialect, key, fn):
try:
return dialect._type_memos[self][key]
except KeyError:
d = self._dialect_info(dialect)
impl = d["impl"]
d[key] = result = fn(impl)
return result
def _dialect_info(self, dialect):
"""Return a dialect-specific registry which
caches a dialect-specific implementation, bind processing
function, and one or more result processing functions."""
if self in dialect._type_memos:
return dialect._type_memos[self]
else:
impl = self._gen_dialect_impl(dialect)
if impl is self:
impl = self.adapt(type(self))
# this can't be self, else we create a cycle
assert impl is not self
dialect._type_memos[self] = d = {"impl": impl}
return d
def _gen_dialect_impl(self, dialect):
return dialect.type_descriptor(self)
def adapt(self, cls, **kw):
"""Produce an "adapted" form of this type, given an "impl" class
to work with.
This method is used internally to associate generic
types with "implementation" types that are specific to a particular
dialect.
"""
return util.constructor_copy(self, cls, **kw)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Given an operator and value, gives the type a chance
to return a type which the value should be coerced into.
The default behavior here is conservative; if the right-hand
side is already coerced into a SQL type based on its
Python type, it is usually left alone.
End-user functionality extension here should generally be via
:class:`.TypeDecorator`, which provides more liberal behavior in that
it defaults to coercing the other side of the expression into this
type, thus applying special Python conversions above and beyond those
needed by the DBAPI to both ides. It also provides the public method
:meth:`.TypeDecorator.coerce_compared_value` which is intended for
end-user customization of this behavior.
"""
_coerced_type = _resolve_value_to_type(value)
if (
_coerced_type is NULLTYPE
or _coerced_type._type_affinity is self._type_affinity
):
return self
else:
return _coerced_type
def _compare_type_affinity(self, other):
return self._type_affinity is other._type_affinity
def compile(self, dialect=None):
"""Produce a string-compiled form of this :class:`.TypeEngine`.
When called with no arguments, uses a "default" dialect
to produce a string result.
:param dialect: a :class:`.Dialect` instance.
"""
# arg, return value is inconsistent with
# ClauseElement.compile()....this is a mistake.
if not dialect:
dialect = self._default_dialect()
return dialect.type_compiler.process(self)
@util.dependencies("sqlalchemy.engine.default")
def _default_dialect(self, default):
if self.__class__.__module__.startswith("sqlalchemy.dialects"):
tokens = self.__class__.__module__.split(".")[0:3]
mod = ".".join(tokens)
return getattr(__import__(mod).dialects, tokens[-1]).dialect()
else:
return default.DefaultDialect()
def __str__(self):
if util.py2k:
return unicode(self.compile()).encode( # noqa
"ascii", "backslashreplace"
) # noqa
else:
return str(self.compile())
def __repr__(self):
return util.generic_repr(self)
class VisitableCheckKWArg(util.EnsureKWArgType, VisitableType):
pass
class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)):
"""Base for user defined types.
This should be the base of new types. Note that
for most cases, :class:`.TypeDecorator` is probably
more appropriate::
import sqlalchemy.types as types
class MyType(types.UserDefinedType):
def __init__(self, precision = 8):
self.precision = precision
def get_col_spec(self, **kw):
return "MYTYPE(%s)" % self.precision
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
Once the type is made, it's immediately usable::
table = Table('foo', meta,
Column('id', Integer, primary_key=True),
Column('data', MyType(16))
)
The ``get_col_spec()`` method will in most cases receive a keyword
argument ``type_expression`` which refers to the owning expression
of the type as being compiled, such as a :class:`.Column` or
:func:`.cast` construct. This keyword is only sent if the method
accepts keyword arguments (e.g. ``**kw``) in its argument signature;
introspection is used to check for this in order to support legacy
forms of this function.
.. versionadded:: 1.0.0 the owning expression is passed to
the ``get_col_spec()`` method via the keyword argument
``type_expression``, if it receives ``**kw`` in its signature.
"""
__visit_name__ = "user_defined"
ensure_kwarg = "get_col_spec"
class Comparator(TypeEngine.Comparator):
__slots__ = ()
def _adapt_expression(self, op, other_comparator):
if hasattr(self.type, "adapt_operator"):
util.warn_deprecated(
"UserDefinedType.adapt_operator is deprecated. Create "
"a UserDefinedType.Comparator subclass instead which "
"generates the desired expression constructs, given a "
"particular operator."
)
return self.type.adapt_operator(op), self.type
else:
return super(
UserDefinedType.Comparator, self
)._adapt_expression(op, other_comparator)
comparator_factory = Comparator
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Default behavior for :class:`.UserDefinedType` is the
same as that of :class:`.TypeDecorator`; by default it returns
``self``, assuming the compared value should be coerced into
the same type as this one. See
:meth:`.TypeDecorator.coerce_compared_value` for more detail.
"""
return self
class Emulated(object):
"""Mixin for base types that emulate the behavior of a DB-native type.
An :class:`.Emulated` type will use an available database type
in conjunction with Python-side routines and/or database constraints
in order to approximate the behavior of a database type that is provided
natively by some backends. When a native-providing backend is in
use, the native version of the type is used. This native version
should include the :class:`.NativeForEmulated` mixin to allow it to be
distinguished from :class:`.Emulated`.
Current examples of :class:`.Emulated` are: :class:`.Interval`,
:class:`.Enum`, :class:`.Boolean`.
.. versionadded:: 1.2.0b3
"""
def adapt_to_emulated(self, impltype, **kw):
"""Given an impl class, adapt this type to the impl assuming "emulated".
The impl should also be an "emulated" version of this type,
most likely the same class as this type itself.
e.g.: sqltypes.Enum adapts to the Enum class.
"""
return super(Emulated, self).adapt(impltype, **kw)
def adapt(self, impltype, **kw):
if hasattr(impltype, "adapt_emulated_to_native"):
if self.native:
# native support requested, dialect gave us a native
# implementor, pass control over to it
return impltype.adapt_emulated_to_native(self, **kw)
else:
# impltype adapts to native, and we are not native,
# so reject the impltype in favor of "us"
impltype = self.__class__
if issubclass(impltype, self.__class__):
return self.adapt_to_emulated(impltype, **kw)
else:
return super(Emulated, self).adapt(impltype, **kw)
class NativeForEmulated(object):
"""Indicates DB-native types supported by an :class:`.Emulated` type.
.. versionadded:: 1.2.0b3
"""
@classmethod
def adapt_emulated_to_native(cls, impl, **kw):
"""Given an impl, adapt this type's class to the impl assuming "native".
The impl will be an :class:`.Emulated` class but not a
:class:`.NativeForEmulated`.
e.g.: postgresql.ENUM produces a type given an Enum instance.
"""
return cls(**kw)
class TypeDecorator(SchemaEventTarget, TypeEngine):
"""Allows the creation of types which add additional functionality
to an existing type.
This method is preferred to direct subclassing of SQLAlchemy's
built-in types as it ensures that all required functionality of
the underlying type is kept in place.
Typical usage::
import sqlalchemy.types as types
class MyType(types.TypeDecorator):
'''Prefixes Unicode values with "PREFIX:" on the way in and
strips it off on the way out.
'''
impl = types.Unicode
def process_bind_param(self, value, dialect):
return "PREFIX:" + value
def process_result_value(self, value, dialect):
return value[7:]
def copy(self, **kw):
return MyType(self.impl.length)
The class-level "impl" attribute is required, and can reference any
TypeEngine class. Alternatively, the load_dialect_impl() method
can be used to provide different type classes based on the dialect
given; in this case, the "impl" variable can reference
``TypeEngine`` as a placeholder.
Types that receive a Python type that isn't similar to the ultimate type
used may want to define the :meth:`TypeDecorator.coerce_compared_value`
method. This is used to give the expression system a hint when coercing
Python objects into bind parameters within expressions. Consider this
expression::
mytable.c.somecol + datetime.date(2009, 5, 15)
Above, if "somecol" is an ``Integer`` variant, it makes sense that
we're doing date arithmetic, where above is usually interpreted
by databases as adding a number of days to the given date.
The expression system does the right thing by not attempting to
coerce the "date()" value into an integer-oriented bind parameter.
However, in the case of ``TypeDecorator``, we are usually changing an
incoming Python type to something new - ``TypeDecorator`` by default will
"coerce" the non-typed side to be the same type as itself. Such as below,
we define an "epoch" type that stores a date value as an integer::
class MyEpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.date(1970, 1, 1)
def process_bind_param(self, value, dialect):
return (value - self.epoch).days
def process_result_value(self, value, dialect):
return self.epoch + timedelta(days=value)
Our expression of ``somecol + date`` with the above type will coerce the
"date" on the right side to also be treated as ``MyEpochType``.
This behavior can be overridden via the
:meth:`~TypeDecorator.coerce_compared_value` method, which returns a type
that should be used for the value of the expression. Below we set it such
that an integer value will be treated as an ``Integer``, and any other
value is assumed to be a date and will be treated as a ``MyEpochType``::
def coerce_compared_value(self, op, value):
if isinstance(value, int):
return Integer()
else:
return self
.. warning::
Note that the **behavior of coerce_compared_value is not inherited
by default from that of the base type**.
If the :class:`.TypeDecorator` is augmenting a
type that requires special logic for certain types of operators,
this method **must** be overridden. A key example is when decorating
the :class:`.postgresql.JSON` and :class:`.postgresql.JSONB` types;
the default rules of :meth:`.TypeEngine.coerce_compared_value` should
be used in order to deal with operators like index operations::
class MyJsonType(TypeDecorator):
impl = postgresql.JSON
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
Without the above step, index operations such as ``mycol['foo']``
will cause the index value ``'foo'`` to be JSON encoded.
"""
__visit_name__ = "type_decorator"
def __init__(self, *args, **kwargs):
"""Construct a :class:`.TypeDecorator`.
Arguments sent here are passed to the constructor
of the class assigned to the ``impl`` class level attribute,
assuming the ``impl`` is a callable, and the resulting
object is assigned to the ``self.impl`` instance attribute
(thus overriding the class attribute of the same name).
If the class level ``impl`` is not a callable (the unusual case),
it will be assigned to the same instance attribute 'as-is',
ignoring those arguments passed to the constructor.
Subclasses can override this to customize the generation
of ``self.impl`` entirely.
"""
if not hasattr(self.__class__, "impl"):
raise AssertionError(
"TypeDecorator implementations "
"require a class-level variable "
"'impl' which refers to the class of "
"type being decorated"
)
self.impl = to_instance(self.__class__.impl, *args, **kwargs)
coerce_to_is_types = (util.NoneType,)
"""Specify those Python types which should be coerced at the expression
level to "IS <constant>" when compared using ``==`` (and same for
``IS NOT`` in conjunction with ``!=``.
For most SQLAlchemy types, this includes ``NoneType``, as well as
``bool``.
:class:`.TypeDecorator` modifies this list to only include ``NoneType``,
as typedecorator implementations that deal with boolean types are common.
Custom :class:`.TypeDecorator` classes can override this attribute to
return an empty tuple, in which case no values will be coerced to
constants.
"""
class Comparator(TypeEngine.Comparator):
__slots__ = ()
def operate(self, op, *other, **kwargs):
kwargs["_python_is_types"] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).operate(
op, *other, **kwargs
)
def reverse_operate(self, op, other, **kwargs):
kwargs["_python_is_types"] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).reverse_operate(
op, other, **kwargs
)
@property
def comparator_factory(self):
if TypeDecorator.Comparator in self.impl.comparator_factory.__mro__:
return self.impl.comparator_factory
else:
return type(
"TDComparator",
(TypeDecorator.Comparator, self.impl.comparator_factory),
{},
)
def _gen_dialect_impl(self, dialect):
"""
#todo
"""
adapted = dialect.type_descriptor(self)
if adapted is not self:
return adapted
# otherwise adapt the impl type, link
# to a copy of this TypeDecorator and return
# that.
typedesc = self._unwrapped_dialect_impl(dialect)
tt = self.copy()
if not isinstance(tt, self.__class__):
raise AssertionError(
"Type object %s does not properly "
"implement the copy() method, it must "
"return an object of type %s" % (self, self.__class__)
)
tt.impl = typedesc
return tt
@property
def _type_affinity(self):
"""
#todo
"""
return self.impl._type_affinity
def _set_parent(self, column):
"""Support SchemaEventTarget"""
super(TypeDecorator, self)._set_parent(column)
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
super(TypeDecorator, self)._set_parent_with_dispatch(parent)
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent_with_dispatch(parent)
def type_engine(self, dialect):
"""Return a dialect-specific :class:`.TypeEngine` instance
for this :class:`.TypeDecorator`.
In most cases this returns a dialect-adapted form of
the :class:`.TypeEngine` type represented by ``self.impl``.
Makes usage of :meth:`dialect_impl` but also traverses
into wrapped :class:`.TypeDecorator` instances.
Behavior can be customized here by overriding
:meth:`load_dialect_impl`.
"""
adapted = dialect.type_descriptor(self)
if not isinstance(adapted, type(self)):
return adapted
elif isinstance(self.impl, TypeDecorator):
return self.impl.type_engine(dialect)
else:
return self.load_dialect_impl(dialect)
def load_dialect_impl(self, dialect):
"""Return a :class:`.TypeEngine` object corresponding to a dialect.
This is an end-user override hook that can be used to provide
differing types depending on the given dialect. It is used
by the :class:`.TypeDecorator` implementation of :meth:`type_engine`
to help determine what type should ultimately be returned
for a given :class:`.TypeDecorator`.
By default returns ``self.impl``.
"""
return self.impl
def _unwrapped_dialect_impl(self, dialect):
"""Return the 'unwrapped' dialect impl for this type.
For a type that applies wrapping logic (e.g. TypeDecorator), give
us the real, actual dialect-level type that is used.
This is used by TypeDecorator itself as well at least one case where
dialects need to check that a particular specific dialect-level
type is in use, within the :meth:`.DefaultDialect.set_input_sizes`
method.
"""
return self.load_dialect_impl(dialect).dialect_impl(dialect)
def __getattr__(self, key):
"""Proxy all other undefined accessors to the underlying
implementation."""
return getattr(self.impl, key)
def process_literal_param(self, value, dialect):
"""Receive a literal parameter value to be rendered inline within
a statement.
This method is used when the compiler renders a
literal value without using binds, typically within DDL
such as in the "server default" of a column or an expression
within a CHECK constraint.
The returned string will be rendered into the output string.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def process_bind_param(self, value, dialect):
"""Receive a bound parameter value to be converted.
Subclasses override this method to return the
value that should be passed along to the underlying
:class:`.TypeEngine` object, and from there to the
DBAPI ``execute()`` method.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
This operation should be designed with the reverse operation
in mind, which would be the process_result_value method of
this class.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
"""
raise NotImplementedError()
def process_result_value(self, value, dialect):
"""Receive a result-row column value to be converted.
Subclasses should implement this method to operate on data
fetched from the database.
Subclasses override this method to return the
value that should be passed back to the application,
given a value that is already processed by
the underlying :class:`.TypeEngine` object, originally
from the DBAPI cursor method ``fetchone()`` or similar.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
This operation should be designed to be reversible by
the "process_bind_param" method of this class.
"""
raise NotImplementedError()
@util.memoized_property
def _has_bind_processor(self):
"""memoized boolean, check if process_bind_param is implemented.
Allows the base process_bind_param to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return (
self.__class__.process_bind_param.__code__
is not TypeDecorator.process_bind_param.__code__
)
@util.memoized_property
def _has_literal_processor(self):
"""memoized boolean, check if process_literal_param is implemented.
"""
return (
self.__class__.process_literal_param.__code__
is not TypeDecorator.process_literal_param.__code__
)
def literal_processor(self, dialect):
"""Provide a literal processing function for the given
:class:`.Dialect`.
Subclasses here will typically override
:meth:`.TypeDecorator.process_literal_param` instead of this method
directly.
By default, this method makes use of
:meth:`.TypeDecorator.process_bind_param` if that method is
implemented, where :meth:`.TypeDecorator.process_literal_param` is
not. The rationale here is that :class:`.TypeDecorator` typically
deals with Python conversions of data that are above the layer of
database presentation. With the value converted by
:meth:`.TypeDecorator.process_bind_param`, the underlying type will
then handle whether it needs to be presented to the DBAPI as a bound
parameter or to the database as an inline SQL value.
.. versionadded:: 0.9.0
"""
if self._has_literal_processor:
process_param = self.process_literal_param
elif self._has_bind_processor:
# the bind processor should normally be OK
# for TypeDecorator since it isn't doing DB-level
# handling, the handling here won't be different for bound vs.
# literals.
process_param = self.process_bind_param
else:
process_param = None
if process_param:
impl_processor = self.impl.literal_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.literal_processor(dialect)
def bind_processor(self, dialect):
"""Provide a bound value processing function for the
given :class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for bound value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_bind_param` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_bind_param` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
This method is the reverse counterpart to the
:meth:`result_processor` method of this class.
"""
if self._has_bind_processor:
process_param = self.process_bind_param
impl_processor = self.impl.bind_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.bind_processor(dialect)
@util.memoized_property
def _has_result_processor(self):
"""memoized boolean, check if process_result_value is implemented.
Allows the base process_result_value to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return (
self.__class__.process_result_value.__code__
is not TypeDecorator.process_result_value.__code__
)
def result_processor(self, dialect, coltype):
"""Provide a result value processing function for the given
:class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for result value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_result_value` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_result_value` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
:param coltype: A SQLAlchemy data type
This method is the reverse counterpart to the
:meth:`bind_processor` method of this class.
"""
if self._has_result_processor:
process_value = self.process_result_value
impl_processor = self.impl.result_processor(dialect, coltype)
if impl_processor:
def process(value):
return process_value(impl_processor(value), dialect)
else:
def process(value):
return process_value(value, dialect)
return process
else:
return self.impl.result_processor(dialect, coltype)
@util.memoized_property
def _has_bind_expression(self):
return (
self.__class__.bind_expression.__code__
is not TypeDecorator.bind_expression.__code__
) or self.impl._has_bind_expression
def bind_expression(self, bindparam):
return self.impl.bind_expression(bindparam)
@util.memoized_property
def _has_column_expression(self):
"""memoized boolean, check if column_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return (
self.__class__.column_expression.__code__
is not TypeDecorator.column_expression.__code__
) or self.impl._has_column_expression
def column_expression(self, column):
return self.impl.column_expression(column)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
By default, returns self. This method is called by
the expression system when an object using this type is
on the left or right side of an expression against a plain Python
object which does not yet have a SQLAlchemy type assigned::
expr = table.c.somecolumn + 35
Where above, if ``somecolumn`` uses this type, this method will
be called with the value ``operator.add``
and ``35``. The return value is whatever SQLAlchemy type should
be used for ``35`` for this particular operation.
"""
return self
def copy(self, **kw):
"""Produce a copy of this :class:`.TypeDecorator` instance.
This is a shallow copy and is provided to fulfill part of
the :class:`.TypeEngine` contract. It usually does not
need to be overridden unless the user-defined :class:`.TypeDecorator`
has local state that should be deep-copied.
"""
instance = self.__class__.__new__(self.__class__)
instance.__dict__.update(self.__dict__)
return instance
def get_dbapi_type(self, dbapi):
"""Return the DBAPI type object represented by this
:class:`.TypeDecorator`.
By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the
underlying "impl".
"""
return self.impl.get_dbapi_type(dbapi)
def compare_values(self, x, y):
"""Given two values, compare them for equality.
By default this calls upon :meth:`.TypeEngine.compare_values`
of the underlying "impl", which in turn usually
uses the Python equals operator ``==``.
This function is used by the ORM to compare
an original-loaded value with an intercepted
"changed" value, to determine if a net change
has occurred.
"""
return self.impl.compare_values(x, y)
def __repr__(self):
return util.generic_repr(self, to_inspect=self.impl)
class Variant(TypeDecorator):
"""A wrapping type that selects among a variety of
implementations based on dialect in use.
The :class:`.Variant` type is typically constructed
using the :meth:`.TypeEngine.with_variant` method.
.. seealso:: :meth:`.TypeEngine.with_variant` for an example of use.
"""
def __init__(self, base, mapping):
"""Construct a new :class:`.Variant`.
:param base: the base 'fallback' type
:param mapping: dictionary of string dialect names to
:class:`.TypeEngine` instances.
"""
self.impl = base
self.mapping = mapping
def coerce_compared_value(self, operator, value):
result = self.impl.coerce_compared_value(operator, value)
if result is self.impl:
return self
else:
return result
def load_dialect_impl(self, dialect):
if dialect.name in self.mapping:
return self.mapping[dialect.name]
else:
return self.impl
def _set_parent(self, column):
"""Support SchemaEventTarget"""
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent(column)
for impl in self.mapping.values():
if isinstance(impl, SchemaEventTarget):
impl._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent_with_dispatch(parent)
for impl in self.mapping.values():
if isinstance(impl, SchemaEventTarget):
impl._set_parent_with_dispatch(parent)
def with_variant(self, type_, dialect_name):
r"""Return a new :class:`.Variant` which adds the given
type + dialect name to the mapping, in addition to the
mapping present in this :class:`.Variant`.
:param type\_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
"""
if dialect_name in self.mapping:
raise exc.ArgumentError(
"Dialect '%s' is already present in "
"the mapping for this Variant" % dialect_name
)
mapping = self.mapping.copy()
mapping[dialect_name] = type_
return Variant(self.impl, mapping)
@property
def comparator_factory(self):
"""express comparison behavior in terms of the base type"""
return self.impl.comparator_factory
def _reconstitute_comparator(expression):
return expression.comparator
def to_instance(typeobj, *arg, **kw):
if typeobj is None:
return NULLTYPE
if util.callable(typeobj):
return typeobj(*arg, **kw)
else:
return typeobj
def adapt_type(typeobj, colspecs):
if isinstance(typeobj, type):
typeobj = typeobj()
for t in typeobj.__class__.__mro__[0:-1]:
try:
impltype = colspecs[t]
break
except KeyError:
pass
else:
# couldn't adapt - so just return the type itself
# (it may be a user-defined type)
return typeobj
# if we adapted the given generic type to a database-specific type,
# but it turns out the originally given "generic" type
# is actually a subclass of our resulting type, then we were already
# given a more specific type than that required; so use that.
if issubclass(typeobj.__class__, impltype):
return typeobj
return typeobj.adapt(impltype)
| 35.115068 | 81 | 0.64276 |
b889943a8469eef3ea69528e533d7f6987d22228 | 837 | py | Python | tests/nlu_core_tests/component_tests/classifier_tests/snips.py | UPbook-innovations/nlu | 2ae02ce7b6ca163f47271e98b71de109d38adefe | [
"Apache-2.0"
] | null | null | null | tests/nlu_core_tests/component_tests/classifier_tests/snips.py | UPbook-innovations/nlu | 2ae02ce7b6ca163f47271e98b71de109d38adefe | [
"Apache-2.0"
] | 2 | 2021-09-28T05:55:05.000Z | 2022-02-26T11:16:21.000Z | tests/nlu_core_tests/component_tests/classifier_tests/snips.py | atdavidpark/nlu | 619d07299e993323d83086c86506db71e2a139a9 | [
"Apache-2.0"
] | 1 | 2021-09-13T10:06:20.000Z | 2021-09-13T10:06:20.000Z |
import unittest
from nlu import *
class TestCyber(unittest.TestCase):
def test_snips_classifer_model(self):
pipe = nlu.load('en.classify.snips',verbose=True)
df = pipe.predict(['I love pancaces. I hate Mondays', 'I love Fridays'])
print(df.columns)
for c in df.columns:print(c,df[c])
def test_snips_ner_model(self):
pipe = nlu.load('en.ner.snips',verbose=True)
df = pipe.predict(['I love pancaces. I hate Mondays', 'I love Fridays'])
print(df.columns)
for c in df.columns:print(c,df[c])
def test_quick(self):
pipe = nlu.load('bn.ner',verbose=True)
df = pipe.predict(['I love pancaces. I hate Mondays', 'I love Fridays'])
print(df.columns)
for c in df.columns:print(c,df[c])
if __name__ == '__main__':
unittest.main()
| 28.862069 | 80 | 0.626045 |
b87b645fa4a74192289c6b89cb6f741250567beb | 2,208 | py | Python | benchmark/startPyquil1234.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startPyquil1234.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startPyquil1234.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=49
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += H(0) # number=43
prog += CZ(4,0) # number=44
prog += H(0) # number=45
prog += Z(4) # number=33
prog += H(0) # number=37
prog += CZ(4,0) # number=38
prog += H(0) # number=39
prog += H(0) # number=1
prog += RX(-1.0430087609918113,4) # number=36
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += CNOT(1,0) # number=40
prog += X(0) # number=41
prog += H(0) # number=46
prog += CZ(1,0) # number=47
prog += H(0) # number=48
prog += X(1) # number=10
prog += RX(-0.06597344572538572,3) # number=27
prog += CNOT(0,2) # number=22
prog += X(2) # number=23
prog += H(2) # number=28
prog += CZ(0,2) # number=29
prog += H(2) # number=30
prog += X(3) # number=12
prog += X(0) # number=13
prog += X(1) # number=14
prog += X(2) # number=15
prog += X(3) # number=16
prog += H(4) # number=35
prog += H(0) # number=17
prog += RX(2.4912829742967055,2) # number=26
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(2) # number=25
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1234.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 25.37931 | 64 | 0.547554 |
c2e423368f4ac81870c10de4e87390b8a7fec7a9 | 1,565 | py | Python | h2o-bindings/bin/custom/python/gen_deeplearning.py | 13927729580/h2o-3 | 850ecb214f01340edb62c45242c76212f4b60381 | [
"Apache-2.0"
] | 1 | 2019-09-15T18:50:36.000Z | 2019-09-15T18:50:36.000Z | h2o-bindings/bin/custom/python/gen_deeplearning.py | 13927729580/h2o-3 | 850ecb214f01340edb62c45242c76212f4b60381 | [
"Apache-2.0"
] | null | null | null | h2o-bindings/bin/custom/python/gen_deeplearning.py | 13927729580/h2o-3 | 850ecb214f01340edb62c45242c76212f4b60381 | [
"Apache-2.0"
] | null | null | null | def module_extensions():
class H2OAutoEncoderEstimator(H2ODeepLearningEstimator):
"""
:examples:
>>> import h2o as ml
>>> from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
>>> ml.init()
>>> rows = [[1,2,3,4,0]*50, [2,1,2,4,1]*50, [2,1,4,2,1]*50, [0,1,2,34,1]*50, [2,3,4,1,0]*50]
>>> fr = ml.H2OFrame(rows)
>>> fr[4] = fr[4].asfactor()
>>> model = H2OAutoEncoderEstimator()
>>> model.train(x=range(4), training_frame=fr)
"""
def __init__(self, **kwargs):
super(H2OAutoEncoderEstimator, self).__init__(**kwargs)
self._parms['autoencoder'] = True
extensions = dict(
__module__=module_extensions
)
overrides = dict(
initial_biases=dict(
setter="""
assert_is_type({pname}, None, [H2OFrame, None])
self._parms["{sname}"] = {pname}
"""
),
initial_weights=dict(
setter="""
assert_is_type({pname}, None, [H2OFrame, None])
self._parms["{sname}"] = {pname}
"""
),
)
doc = dict(
__class__="""
Build a Deep Neural Network model using CPUs
Builds a feed-forward multilayer artificial neural network on an H2OFrame
"""
)
examples = dict(
__class__="""
>>> import h2o
>>> from h2o.estimators.deeplearning import H2ODeepLearningEstimator
>>> h2o.connect()
>>> rows = [[1,2,3,4,0], [2,1,2,4,1], [2,1,4,2,1], [0,1,2,34,1], [2,3,4,1,0]] * 50
>>> fr = h2o.H2OFrame(rows)
>>> fr[4] = fr[4].asfactor()
>>> model = H2ODeepLearningEstimator()
>>> model.train(x=range(4), y=4, training_frame=fr)
"""
)
| 26.525424 | 100 | 0.597444 |
7030c29b25258ba895765d926aa4bf32b359cb46 | 1,122 | py | Python | tests/test_mongo_controller_index_unique.py | lucafaggianelli/layabase | 90733c6b9efd56051dfce5c3d89bd4e657ce7b3f | [
"MIT"
] | 3 | 2019-12-02T23:29:44.000Z | 2019-12-31T00:55:01.000Z | tests/test_mongo_controller_index_unique.py | lucafaggianelli/layabase | 90733c6b9efd56051dfce5c3d89bd4e657ce7b3f | [
"MIT"
] | 29 | 2019-12-02T16:12:45.000Z | 2022-02-17T16:01:55.000Z | tests/test_mongo_controller_index_unique.py | lucafaggianelli/layabase | 90733c6b9efd56051dfce5c3d89bd4e657ce7b3f | [
"MIT"
] | 3 | 2020-01-02T10:58:47.000Z | 2022-02-17T10:55:18.000Z | import pytest
import layabase
import layabase.mongo
@pytest.fixture
def controller():
class TestCollection:
__collection_name__ = "test"
id = layabase.mongo.Column(index_type=layabase.mongo.IndexType.Unique)
id2 = layabase.mongo.Column(index_type=layabase.mongo.IndexType.Unique)
controller = layabase.CRUDController(TestCollection)
layabase.load("mongomock", [controller])
return controller
def test_put_many_without_primary_key_and_unique_index_update(
controller: layabase.CRUDController,
):
assert controller.post_many(
[{"id": "test1", "id2": "test1"}, {"id": "test1", "id2": "test2"}]
) == [{"id": "test1", "id2": "test1"}, {"id": "test1", "id2": "test2"}]
# It should never be declared without a PK in this case but as there is no PK, the first document is updated.
with pytest.raises(layabase.ValidationFailed) as exception_info:
controller.put_many([{"id2": "test2"}])
assert exception_info.value.errors == {"": ["One document already exists."]}
assert exception_info.value.received_data == [{"id": None, "id2": "test2"}]
| 36.193548 | 113 | 0.690731 |
d47ad07421f18e7dc9d687cb2c02f3a12194be08 | 1,972 | py | Python | yt_dlp/extractor/mojvideo.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
] | 11 | 2022-01-06T22:09:50.000Z | 2022-03-12T22:26:22.000Z | yt_dlp/extractor/mojvideo.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
] | 4 | 2022-02-25T08:20:18.000Z | 2022-03-17T16:16:20.000Z | yt_dlp/extractor/mojvideo.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
] | 3 | 2022-02-19T08:59:13.000Z | 2022-03-06T16:11:21.000Z | from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_duration,
)
class MojvideoIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?mojvideo\.com/video-(?P<display_id>[^/]+)/(?P<id>[a-f0-9]+)'
_TEST = {
'url': 'http://www.mojvideo.com/video-v-avtu-pred-mano-rdecelaska-alfi-nipic/3d1ed4497707730b2906',
'md5': 'f7fd662cc8ce2be107b0d4f2c0483ae7',
'info_dict': {
'id': '3d1ed4497707730b2906',
'display_id': 'v-avtu-pred-mano-rdecelaska-alfi-nipic',
'ext': 'mp4',
'title': 'V avtu pred mano rdečelaska - Alfi Nipič',
'thumbnail': r're:^http://.*\.jpg$',
'duration': 242,
}
}
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
# XML is malformed
playerapi = self._download_webpage(
'http://www.mojvideo.com/playerapi.php?v=%s&t=1' % video_id, display_id)
if '<error>true</error>' in playerapi:
error_desc = self._html_search_regex(
r'<errordesc>([^<]*)</errordesc>', playerapi, 'error description', fatal=False)
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_desc), expected=True)
title = self._html_extract_title(playerapi)
video_url = self._html_search_regex(
r'<file>([^<]+)</file>', playerapi, 'video URL')
thumbnail = self._html_search_regex(
r'<preview>([^<]+)</preview>', playerapi, 'thumbnail', fatal=False)
duration = parse_duration(self._html_search_regex(
r'<duration>([^<]+)</duration>', playerapi, 'duration', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
}
| 37.207547 | 107 | 0.573022 |
11ed06b5fa01f9a6146e1d81ea4334d8b8b18714 | 1,720 | py | Python | src/app/nginx/controller.py | colinnewell/Adventure-Insecure | 46717dd14d88887559bb3a392c67b534c294edaa | [
"MIT"
] | 4 | 2016-09-24T19:46:12.000Z | 2017-07-08T02:17:06.000Z | src/app/nginx/controller.py | colinnewell/Adventure-Insecure | 46717dd14d88887559bb3a392c67b534c294edaa | [
"MIT"
] | null | null | null | src/app/nginx/controller.py | colinnewell/Adventure-Insecure | 46717dd14d88887559bb3a392c67b534c294edaa | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template, \
flash, session, redirect, url_for, current_app
from app.auth.utils import admin_required
from app.nginx.forms import ConfirmClearForm
from app.auth.models import User
import pexpect
import time
import logging
nginx = Blueprint('nginx', __name__, url_prefix='/nginx')
@nginx.route('/clear_cache', methods=['GET', 'POST'])
@admin_required
def clear_cache():
form = ConfirmClearForm()
if form.validate_on_submit():
# FIXME: find their username from ldap
# and their password from the session.
# actually we could store the username
# in the session.
password = session['password']
user = User.query.filter_by(id=session['user_id']).first()
ldap = current_app.ldap
uinfo = ldap.user_info_by_email(user.email, ['uid'])
if uinfo:
username = uinfo['attributes']['uid'][0]
command = "ssh -o StrictHostKeyChecking=no %s@ssh 'sudo find /var/cache/nginx/ -type f -exec rm {} \;'" % username
logging.debug('Running: ' + command)
try:
child = pexpect.spawn(command)
child.expect('password:')
time.sleep(0.1)
child.sendline(password)
time.sleep(0.5)
child.expect(pexpect.EOF)
flash("Cache cleared", "message")
return redirect(url_for('menus.index'))
except Exception as e:
logging.warning(e)
flash("Failed to clear cache", "error")
else:
flash('Only LDAP users can use this feature', 'error')
return render_template('nginx/clear_cache.html', form=form)
| 36.595745 | 126 | 0.609302 |
73be987116ea6dd87224362579d45fc9e25722ea | 1,773 | py | Python | matcher/api/serializers.py | rzetelskik/ad_hoc | c46d55bd029cbcf5ad40f1fd9f9f40a0f4a60851 | [
"MIT"
] | null | null | null | matcher/api/serializers.py | rzetelskik/ad_hoc | c46d55bd029cbcf5ad40f1fd9f9f40a0f4a60851 | [
"MIT"
] | null | null | null | matcher/api/serializers.py | rzetelskik/ad_hoc | c46d55bd029cbcf5ad40f1fd9f9f40a0f4a60851 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from matcher.models import Answer, Match
from account.models import CustomUser
class AnswerSerializer(serializers.ModelSerializer):
recipient = serializers.SlugRelatedField(slug_field='username', queryset=CustomUser.objects.all(), many=False,
required=True)
class Meta:
model = Answer
fields = ['recipient', 'agreed']
def create(self, validated_data):
user = self.context['request'].user
recipient = validated_data['recipient']
agreed = validated_data['agreed']
answer = Answer.objects.create(sender_id=user.pk, recipient_id=recipient.pk, agreed=agreed)
return answer
class CurrentMatchSerializer(serializers.Serializer):
match_id = serializers.ReadOnlyField()
first_name = serializers.CharField(max_length=30)
distance = serializers.DecimalField(max_digits=5, decimal_places=2)
match_timestamp = serializers.DateTimeField()
common_tags = serializers.ListField(
child=serializers.CharField()
)
class TerminatedMatchSerializer(serializers.ModelSerializer):
match_id = serializers.SerializerMethodField()
first_name = serializers.SerializerMethodField()
common_tags = serializers.SerializerMethodField()
def get_match_id(self, obj):
return obj.pk
def get_first_name(self, obj):
user = self.context['request'].user
matched_user = obj.user1 if obj.user2 == user else obj.user2
return matched_user.first_name
def get_common_tags(self, obj):
return [tag.name for tag in obj.common_tags.all()]
class Meta:
model = Match
fields = ['match_id', 'first_name', 'time_start', 'time_end', 'common_tags']
| 32.833333 | 114 | 0.698252 |
c6f5acb8f88f1d1aafdd6a69ba09b476dc378cdf | 5,156 | py | Python | test/functional/test_framework/netutil.py | Deimoscoin/deimos | c03a65c72ffe6fadb840bc87e6fd6b4e012def08 | [
"MIT"
] | null | null | null | test/functional/test_framework/netutil.py | Deimoscoin/deimos | c03a65c72ffe6fadb840bc87e6fd6b4e012def08 | [
"MIT"
] | null | null | null | test/functional/test_framework/netutil.py | Deimoscoin/deimos | c03a65c72ffe6fadb840bc87e6fd6b4e012def08 | [
"MIT"
] | 1 | 2018-06-12T00:50:01.000Z | 2018-06-12T00:50:01.000Z | #!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DeimOS Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Linux network utilities.
Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
"""
import sys
import socket
import struct
import array
import os
from binascii import unhexlify, hexlify
# STATE_ESTABLISHED = '01'
# STATE_SYN_SENT = '02'
# STATE_SYN_RECV = '03'
# STATE_FIN_WAIT1 = '04'
# STATE_FIN_WAIT2 = '05'
# STATE_TIME_WAIT = '06'
# STATE_CLOSE = '07'
# STATE_CLOSE_WAIT = '08'
# STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
# STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = unhexlify(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
import fcntl # Linux only, so only import when required
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return hexlify(bytearray(addr)).decode('ascii')
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
import socket
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
except socket.error:
have_ipv6 = False
return have_ipv6
| 32.427673 | 111 | 0.602599 |
3c9deec45885145bc4f483a6447a651998248fc4 | 5,922 | py | Python | fissix/pgen2/driver.py | MingMingShangTian/paddle1to2 | 4a711ea310242d0a18e692ddebf3e02f90457b8f | [
"Apache-2.0"
] | 11 | 2021-07-14T12:51:58.000Z | 2022-03-10T09:05:32.000Z | fissix/pgen2/driver.py | MingMingShangTian/paddle1to2 | 4a711ea310242d0a18e692ddebf3e02f90457b8f | [
"Apache-2.0"
] | 3 | 2020-09-22T07:00:25.000Z | 2020-11-14T03:36:42.000Z | fissix/pgen2/driver.py | MingMingShangTian/paddle1to2 | 4a711ea310242d0a18e692ddebf3e02f90457b8f | [
"Apache-2.0"
] | 5 | 2021-01-06T05:58:46.000Z | 2022-02-14T01:42:59.000Z | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <[email protected]>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import io
import os
import logging
import pkgutil
import sys
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = ""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug(
"%s %r (prefix=%r)", token.tok_name[type], value, prefix
)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input", type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
with io.open(filename, "r", encoding=encoding) as stream:
return self.parse_stream(stream, debug)
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(io.StringIO(text).readline)
return self.parse_tokens(tokens, debug)
def _generate_pickle_name(gt):
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
return head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
def load_grammar(gt="Grammar.txt", gp=None, save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
gp = _generate_pickle_name(gt) if gp is None else gp
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except OSError as e:
logger.info("Writing failed: %s", e)
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
def load_packaged_grammar(package, grammar_source):
"""Normally, loads a pickled grammar by doing
pkgutil.get_data(package, pickled_grammar)
where *pickled_grammar* is computed from *grammar_source* by adding the
Python version and using a ``.pickle`` extension.
However, if *grammar_source* is an extant file, load_grammar(grammar_source)
is called instead. This facilitates using a packaged grammar file when needed
but preserves load_grammar's automatic regeneration behavior when possible.
"""
if os.path.isfile(grammar_source):
return load_grammar(grammar_source, save=False, force=True)
pickled_name = _generate_pickle_name(os.path.basename(grammar_source))
data = pkgutil.get_data(package, pickled_name)
g = grammar.Grammar()
g.loads(data)
return g
def main(*args):
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.
"""
if not args:
args = sys.argv[1:]
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format="%(message)s")
for gt in args:
load_grammar(gt, save=False, force=True)
return True
if __name__ == "__main__":
sys.exit(int(not main()))
| 33.647727 | 84 | 0.60233 |
3d68ba20eee43f690de828576d983d0fbada095d | 24,158 | py | Python | electrumx/server/peers.py | johiruddinsultan/electrumx | 493d653406a6f67c7a991f6c48c8ca5f62d735e9 | [
"MIT"
] | null | null | null | electrumx/server/peers.py | johiruddinsultan/electrumx | 493d653406a6f67c7a991f6c48c8ca5f62d735e9 | [
"MIT"
] | null | null | null | electrumx/server/peers.py | johiruddinsultan/electrumx | 493d653406a6f67c7a991f6c48c8ca5f62d735e9 | [
"MIT"
] | null | null | null | # Copyright (c) 2017-2018, JUS
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Peer management.'''
import asyncio
import random
import socket
import ssl
import time
from collections import Counter, defaultdict
from ipaddress import IPv4Address, IPv6Address
from typing import TYPE_CHECKING, Type
import aiohttp
from aiorpcx import (Event, Notification, RPCError, RPCSession, SOCKSError,
SOCKSProxy, TaskGroup, TaskTimeout, connect_rs,
handler_invocation, ignore_after, sleep)
from aiorpcx.jsonrpc import CodeMessageError
from electrumx.lib.peer import Peer
from electrumx.lib.util import class_logger, json_deserialize
if TYPE_CHECKING:
from electrumx.server.env import Env
from electrumx.server.db import DB
PEER_GOOD, PEER_STALE, PEER_NEVER, PEER_BAD = range(4)
STATUS_DESCS = ('good', 'stale', 'never', 'bad')
STALE_SECS = 3 * 3600
WAKEUP_SECS = 300
PEER_ADD_PAUSE = 600
class BadPeerError(Exception):
pass
def assert_good(message, result, instance):
if not isinstance(result, instance):
raise BadPeerError(f'{message} returned bad result type '
f'{type(result).__name__}')
class PeerSession(RPCSession):
'''An outgoing session to a peer.'''
async def handle_request(self, request):
# We subscribe so might be unlucky enough to get a notification...
if (isinstance(request, Notification) and
request.method == 'blockchain.headers.subscribe'):
pass
else:
await handler_invocation(None, request) # Raises
class PeerManager:
'''Looks after the DB of peer network servers.
Attempts to maintain a connection with up to 8 peers.
Issues a 'peers.subscribe' RPC to them and tells them our data.
'''
def __init__(self, env: 'Env', db: 'DB'):
self.logger = class_logger(__name__, self.__class__.__name__)
# Initialise the Peer class
Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS
self.env = env
self.db = db
# Our reported clearnet and Tor Peers, if any
sclass = env.coin.SESSIONCLS
self.myselves = [Peer(str(service.host), sclass.server_features(env), 'env')
for service in env.report_services]
self.server_version_args = sclass.server_version_args()
# Peers have one entry per hostname. Once connected, the
# ip_addr property is either None, an onion peer, or the
# IP address that was connected to. Adding a peer will evict
# any other peers with the same host name or IP address.
self.peers = set()
self.permit_onion_peer_time = time.time()
self.proxy = None
self.group = TaskGroup()
self.recent_peer_adds = {}
# refreshed
self.blacklist = set()
def _my_clearnet_peer(self):
'''Returns the clearnet peer representing this server, if any.'''
clearnet = [peer for peer in self.myselves if not peer.is_tor]
return clearnet[0] if clearnet else None
def _set_peer_statuses(self):
'''Set peer statuses.'''
cutoff = time.time() - STALE_SECS
for peer in self.peers:
if peer.bad:
peer.status = PEER_BAD
elif peer.last_good > cutoff:
peer.status = PEER_GOOD
elif peer.last_good:
peer.status = PEER_STALE
else:
peer.status = PEER_NEVER
def _features_to_register(self, peer, remote_peers):
'''If we should register ourselves to the remote peer, which has
reported the given list of known peers, return the clearnet
identity features to register, otherwise None.
'''
# Announce ourself if not present. Don't if disabled, we
# are a non-public IP address, or to ourselves.
if not self.env.peer_announce or peer in self.myselves:
return None
my = self._my_clearnet_peer()
if not my or not my.is_public:
return None
# Register if no matches, or ports have changed
for peer in my.matches(remote_peers):
if peer.tcp_port == my.tcp_port and peer.ssl_port == my.ssl_port:
return None
return my.features
def _permit_new_onion_peer(self, now):
'''Accept a new onion peer only once per random time interval.'''
if now < self.permit_onion_peer_time:
return False
self.permit_onion_peer_time = now + random.randrange(0, 1200)
return True
async def _import_peers(self):
'''Import hard-coded peers from a file or the coin defaults.'''
imported_peers = self.myselves.copy()
# Add the hard-coded ones unless only reporting ourself
if self.env.peer_discovery != self.env.PD_SELF:
imported_peers.extend(Peer.from_real_name(real_name, 'coins.py')
for real_name in self.env.coin.PEERS)
await self._note_peers(imported_peers, limit=None)
async def _refresh_blacklist(self):
url = self.env.blacklist_url
if not url:
return
async def read_blacklist():
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
text = await response.text()
return {entry.lower() for entry in json_deserialize(text)}
while True:
try:
self.blacklist = await read_blacklist()
except Exception as e:
self.logger.error(f'could not retrieve blacklist from {url}: {e}')
else:
self.logger.info(f'blacklist from {url} has {len(self.blacklist)} entries')
# Got new blacklist. Now check our current peers against it
for peer in self.peers:
if self._is_blacklisted(peer):
peer.retry_event.set()
await sleep(600)
def _is_blacklisted(self, peer):
host = peer.host.lower()
second_level_domain = '*.' + '.'.join(host.split('.')[-2:])
return any(item in self.blacklist
for item in (host, second_level_domain, peer.ip_addr))
def _get_recent_good_peers(self):
cutoff = time.time() - STALE_SECS
recent = [peer for peer in self.peers
if peer.last_good > cutoff and
not peer.bad and peer.is_public]
recent = [peer for peer in recent if not self._is_blacklisted(peer)]
return recent
async def _detect_proxy(self):
'''Detect a proxy if we don't have one and some time has passed since
the last attempt.
If found self.proxy is set to a SOCKSProxy instance, otherwise None.
'''
host = self.env.tor_proxy_host
if self.env.tor_proxy_port is None:
ports = [9050, 9150, 1080]
else:
ports = [self.env.tor_proxy_port]
while True:
self.logger.info(f'trying to detect proxy on "{host}" '
f'ports {ports}')
proxy = await SOCKSProxy.auto_detect_at_host(host, ports, None)
if proxy:
self.proxy = proxy
self.logger.info(f'detected {proxy}')
return
self.logger.info('no proxy detected, will try later')
await sleep(900)
async def _note_peers(self, peers, limit=2, check_ports=False, source=None):
'''Add a limited number of peers that are not already present.'''
new_peers = []
match_set = self.peers.copy()
for peer in peers:
if not peer.is_public or (peer.is_tor and not self.proxy):
continue
matches = peer.matches(match_set)
if matches:
if check_ports:
for match in matches:
if match.check_ports(peer):
self.logger.info(f'ports changed for {peer}')
# Retry connecting to the peer. First we will try the existing
# ports and then try the new ports. Note that check_ports above
# had a side_effect to temporarily store the new ports.
# If we manage to connect, we will call 'server.features',
# and the ports for this peer will be updated to the return values.
match.retry_event.set()
else:
match_set.add(peer)
new_peers.append(peer)
if new_peers:
source = source or new_peers[0].source
if limit:
random.shuffle(new_peers)
use_peers = new_peers[:limit]
else:
use_peers = new_peers
for peer in use_peers:
self.logger.info(f'accepted new peer {peer} from {source}')
peer.retry_event = Event()
self.peers.add(peer)
await self.group.spawn(self._monitor_peer(peer))
return True
async def _monitor_peer(self, peer):
# Stop monitoring if we were dropped (a duplicate peer)
while peer in self.peers:
if await self._should_drop_peer(peer):
self.peers.discard(peer)
break
# Figure out how long to sleep before retrying. Retry a
# good connection when it is about to turn stale, otherwise
# exponentially back off retries.
if peer.try_count == 0:
pause = STALE_SECS - WAKEUP_SECS * 2
else:
pause = WAKEUP_SECS * 2 ** peer.try_count
async with ignore_after(pause):
await peer.retry_event.wait()
peer.retry_event.clear()
async def _should_drop_peer(self, peer):
peer.try_count += 1
is_good = False
for kind, port, family in peer.connection_tuples():
peer.last_try = time.time()
kwargs = {'family': family}
if kind == 'SSL':
kwargs['ssl'] = ssl.SSLContext(ssl.PROTOCOL_TLS)
if self.env.force_proxy or peer.is_tor:
if not self.proxy:
return
kwargs['proxy'] = self.proxy
kwargs['resolve'] = not peer.is_tor
else:
# Use our listening Host/IP for outgoing non-proxy
# connections so our peers see the correct source.
local_hosts = {service.host for service in self.env.services
if isinstance(service.host, (IPv4Address, IPv6Address))
and service.protocol != 'rpc'}
if local_hosts:
kwargs['local_addr'] = (str(local_hosts.pop()), None)
peer_text = f'[{peer}:{port} {kind}]'
try:
async with connect_rs(peer.host, port, session_factory=PeerSession,
**kwargs) as session:
session.sent_request_timeout = 120 if peer.is_tor else 30
await self._verify_peer(session, peer)
is_good = True
break
except BadPeerError as e:
self.logger.error(f'{peer_text} marking bad: ({e})')
peer.mark_bad()
break
except CodeMessageError as e:
self.logger.error(f'{peer_text} RPC error: {e.message} '
f'({e.code})')
except (OSError, SOCKSError, ConnectionError, TaskTimeout) as e:
self.logger.info(f'{peer_text} {e}')
if is_good:
# Monotonic time would be better, but last_good and last_try are
# exported to admin RPC client.
now = time.time()
elapsed = now - peer.last_try
self.logger.info(f'{peer_text} verified in {elapsed:.1f}s')
peer.try_count = 0
peer.last_good = now
peer.source = 'peer'
# At most 2 matches if we're a host name, potentially
# several if we're an IP address (several instances
# can share a NAT).
matches = peer.matches(self.peers)
for match in matches:
if match.ip_address:
if len(matches) > 1:
self.peers.remove(match)
# Force the peer's monitoring task to exit
match.retry_event.set()
elif peer.host in match.features['hosts']:
match.update_features_from_peer(peer)
# Trim this data structure
self.recent_peer_adds = {k: v for k, v in self.recent_peer_adds.items()
if v + PEER_ADD_PAUSE < now}
else:
# Forget the peer if long-term unreachable
if peer.last_good and not peer.bad:
try_limit = 10
else:
try_limit = 3
if peer.try_count >= try_limit:
desc = 'bad' if peer.bad else 'unreachable'
self.logger.info(f'forgetting {desc} peer: {peer}')
return True
return False
async def _verify_peer(self, session, peer):
# store IP address for peer
if not peer.is_tor:
address = session.remote_address()
if isinstance(address.host, (IPv4Address, IPv6Address)):
peer.ip_addr = str(address.host)
if self._is_blacklisted(peer):
raise BadPeerError('blacklisted')
# Bucket good recent peers; forbid many servers from similar IPs
# FIXME there's a race here, when verifying multiple peers
# that belong to the same bucket ~simultaneously
recent_peers = self._get_recent_good_peers()
if peer in recent_peers:
recent_peers.remove(peer)
onion_peers = []
buckets = defaultdict(list)
for other_peer in recent_peers:
if other_peer.is_tor:
onion_peers.append(other_peer)
else:
buckets[other_peer.bucket_for_internal_purposes()].append(other_peer)
if peer.is_tor:
# keep number of onion peers below half of all peers,
# but up to 100 is OK regardless
if len(onion_peers) > len(recent_peers) // 2 >= 100:
raise BadPeerError('too many onion peers already')
else:
bucket = peer.bucket_for_internal_purposes()
if buckets[bucket]:
raise BadPeerError(f'too many peers already in bucket {bucket}')
# server.version goes first
message = 'server.version'
try:
result = await session.send_request(message, self.server_version_args)
except asyncio.CancelledError:
raise BadPeerError('terminated before server.version response')
assert_good(message, result, list)
# Protocol version 1.1 returns a pair with the version first
if len(result) != 2 or not all(isinstance(x, str) for x in result):
raise BadPeerError(f'bad server.version result: {result}')
server_version, _protocol_version = result
peer.server_version = server_version
peer.features['server_version'] = server_version
async with TaskGroup() as g:
await g.spawn(self._send_headers_subscribe(session))
await g.spawn(self._send_server_features(session, peer))
peers_task = await g.spawn(self._send_peers_subscribe
(session, peer))
# Process reported peers if remote peer is good
peers = peers_task.result()
await self._note_peers(peers)
features = self._features_to_register(peer, peers)
if features:
self.logger.info(f'registering ourself with {peer}')
# We only care to wait for the response
try:
await session.send_request('server.add_peer', [features])
except asyncio.CancelledError:
raise BadPeerError('terminated before server.add_peer response')
async def _send_headers_subscribe(self, session):
message = 'blockchain.headers.subscribe'
result = await session.send_request(message)
assert_good(message, result, dict)
our_height = self.db.db_height
their_height = result.get('height')
if not isinstance(their_height, int):
raise BadPeerError(f'invalid height {their_height}')
if abs(our_height - their_height) > 5:
raise BadPeerError(f'bad height {their_height:,d} '
f'(ours: {our_height:,d})')
# Check prior header too in case of hard fork.
check_height = min(our_height, their_height)
raw_header = await self.db.raw_header(check_height)
ours = raw_header.hex()
message = 'blockchain.block.header'
theirs = await session.send_request(message, [check_height])
assert_good(message, theirs, str)
if ours != theirs:
raise BadPeerError(f'our header {ours} and '
f'theirs {theirs} differ')
async def _send_server_features(self, session, peer):
message = 'server.features'
features = await session.send_request(message)
assert_good(message, features, dict)
hosts = [host.lower() for host in features.get('hosts', {})]
if self.env.coin.GENESIS_HASH != features.get('genesis_hash'):
raise BadPeerError('incorrect genesis hash')
if peer.host.lower() in hosts:
peer.update_features(features)
else:
raise BadPeerError(f'not listed in own hosts list {hosts}')
async def _send_peers_subscribe(self, session, peer):
message = 'server.peers.subscribe'
raw_peers = await session.send_request(message)
assert_good(message, raw_peers, list)
# Check the peers list we got from a remote peer.
# Each is expected to be of the form:
# [ip_addr, hostname, ['v1.0', 't51001', 's51002']]
# Call add_peer if the remote doesn't appear to know about us.
try:
real_names = [' '.join([u[1]] + u[2]) for u in raw_peers]
return [Peer.from_real_name(real_name, str(peer))
for real_name in real_names]
except Exception:
raise BadPeerError('bad server.peers.subscribe response')
#
# External interface
#
async def discover_peers(self):
'''Perform peer maintenance. This includes
1) Forgetting unreachable peers.
2) Verifying connectivity of new peers.
3) Retrying old peers at regular intervals.
'''
self.logger.info(f'peer discovery: {self.env.peer_discovery}')
if self.env.peer_discovery != self.env.PD_ON:
self.logger.info('peer discovery is disabled')
return
self.logger.info(f'announce ourself: {self.env.peer_announce}')
self.logger.info(f'my clearnet self: {self._my_clearnet_peer()}')
self.logger.info(f'force use of proxy: {self.env.force_proxy}')
self.logger.info(f'beginning peer discovery...')
async with self.group as group:
await group.spawn(self._refresh_blacklist())
await group.spawn(self._detect_proxy())
await group.spawn(self._import_peers())
def info(self):
'''The number of peers.'''
self._set_peer_statuses()
counter = Counter(peer.status for peer in self.peers)
return {
'bad': counter[PEER_BAD],
'good': counter[PEER_GOOD],
'never': counter[PEER_NEVER],
'stale': counter[PEER_STALE],
'total': len(self.peers),
}
async def add_localRPC_peer(self, real_name):
'''Add a peer passed by the admin over LocalRPC.'''
await self._note_peers([Peer.from_real_name(real_name, 'RPC')], check_ports=True)
async def on_add_peer(self, features, source_addr):
'''Add a peer (but only if the peer resolves to the source).'''
if self.env.peer_discovery != self.env.PD_ON:
return False
if not source_addr:
self.logger.info('ignored add_peer request: no source info')
return False
source = str(source_addr.host)
peers = Peer.peers_from_features(features, source)
if not peers:
self.logger.info('ignored add_peer request: no peers given')
return False
# Just look at the first peer, require it
peer = peers[0]
host = peer.host
now = time.time()
# Rate limit peer adds by domain to one every 10 minutes
if peer.ip_address is not None:
bucket = 'ip_addr'
else:
bucket = '.'.join(host.lower().split('.')[-2:])
last = self.recent_peer_adds.get(bucket, 0)
self.recent_peer_adds[bucket] = now
if last + PEER_ADD_PAUSE >= now:
return False
if peer.is_tor:
permit = self._permit_new_onion_peer(now)
reason = 'rate limiting'
else:
getaddrinfo = asyncio.get_event_loop().getaddrinfo
try:
infos = await getaddrinfo(host, 80, type=socket.SOCK_STREAM)
except socket.gaierror:
permit = False
reason = 'address resolution failure'
else:
permit = any(source == info[-1][0] for info in infos)
reason = 'source-destination mismatch'
if permit:
self.logger.info(f'accepted add_peer request from {source} for {host}')
await self._note_peers([peer], check_ports=True)
else:
self.logger.warning(f'rejected add_peer request from {source} '
f'for {host} ({reason})')
return permit
def on_peers_subscribe(self, is_tor):
'''Returns the server peers as a list of (ip, host, details) tuples.
We return all peers we've connected to in the last day.
Additionally, if we don't have onion routing, we return a few
hard-coded onion servers.
'''
recent = self._get_recent_good_peers()
# Always report ourselves if valid (even if not public)
cutoff = time.time() - STALE_SECS
peers = {
myself
for myself in self.myselves
if myself.last_good > cutoff
}
# Bucket the clearnet peers and select up to two from each
onion_peers = []
buckets = defaultdict(list)
for peer in recent:
if peer.is_tor:
onion_peers.append(peer)
else:
buckets[peer.bucket_for_external_interface()].append(peer)
for bucket_peers in buckets.values():
random.shuffle(bucket_peers)
peers.update(bucket_peers[:2])
# Add up to 20% onion peers (but up to 10 is OK anyway)
random.shuffle(onion_peers)
max_onion = 50 if is_tor else max(10, len(peers) // 4)
peers.update(onion_peers[:max_onion])
return [peer.to_tuple() for peer in peers]
def proxy_address(self):
'''Return the NetAddress of the proxy, if there is a proxy, otherwise
None.'''
return self.proxy.address if self.proxy else None
def rpc_data(self):
'''Peer data for the peers RPC method.'''
self._set_peer_statuses()
def peer_data(peer):
data = peer.serialize()
data['status'] = STATUS_DESCS[peer.status]
return data
def peer_key(peer):
return peer.bad, -peer.last_good
return [peer_data(peer) for peer in sorted(self.peers, key=peer_key)]
| 39.996689 | 95 | 0.587011 |
a3a5d3a714e64a0deedd90a5a96c69ea2fbeea0e | 2,698 | py | Python | tests/cluster/check_finite_subcluster.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | tests/cluster/check_finite_subcluster.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | tests/cluster/check_finite_subcluster.py | rancp/ducktape-docs | e1a3b1b7e68beedf5f8d29a4e5f196912a20e264 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.cluster.finite_subcluster import FiniteSubcluster
from ducktape.services.service import Service
from ducktape.cluster.remoteaccount import RemoteAccount
import pickle
import pytest
class MockFiniteSubclusterNode:
@property
def operating_system(self):
return RemoteAccount.LINUX
class CheckFiniteSubcluster(object):
single_node_cluster_json = {"nodes": [{"hostname": "localhost"}]}
def check_cluster_size(self):
cluster = FiniteSubcluster([])
assert len(cluster) == 0
n = 10
cluster = FiniteSubcluster([MockFiniteSubclusterNode() for _ in range(n)])
assert len(cluster) == n
def check_pickleable(self):
cluster = FiniteSubcluster([MockFiniteSubclusterNode() for _ in range(10)])
pickle.dumps(cluster)
def check_allocate_free(self):
n = 10
cluster = FiniteSubcluster([MockFiniteSubclusterNode() for _ in range(n)])
assert len(cluster) == n
assert cluster.num_available_nodes() == n
nodes = cluster.alloc(Service.setup_node_spec(num_nodes=1))
assert len(nodes) == 1
assert len(cluster) == n
assert cluster.num_available_nodes() == n - 1
nodes2 = cluster.alloc(Service.setup_node_spec(num_nodes=2))
assert len(nodes2) == 2
assert len(cluster) == n
assert cluster.num_available_nodes() == n - 3
cluster.free(nodes)
assert cluster.num_available_nodes() == n - 2
cluster.free(nodes2)
assert cluster.num_available_nodes() == n
def check_alloc_too_many(self):
n = 10
cluster = FiniteSubcluster([MockFiniteSubclusterNode() for _ in range(n)])
with pytest.raises(AssertionError):
cluster.alloc(Service.setup_node_spec(num_nodes=(n + 1)))
def check_free_too_many(self):
n = 10
cluster = FiniteSubcluster([MockFiniteSubclusterNode() for _ in range(n)])
nodes = cluster.alloc(Service.setup_node_spec(num_nodes=n))
with pytest.raises(AssertionError):
nodes.append(object())
cluster.free(nodes)
| 34.589744 | 83 | 0.687546 |
cf2e2c2b22fad7ce82a074e4d7210f28e4b4eaf2 | 2,295 | py | Python | tests/reporter_test.py | vardaan-raj/rally | ec04919d3b44c1e694ae5bc2c9a7901cf31b8e89 | [
"Apache-2.0"
] | null | null | null | tests/reporter_test.py | vardaan-raj/rally | ec04919d3b44c1e694ae5bc2c9a7901cf31b8e89 | [
"Apache-2.0"
] | null | null | null | tests/reporter_test.py | vardaan-raj/rally | ec04919d3b44c1e694ae5bc2c9a7901cf31b8e89 | [
"Apache-2.0"
] | null | null | null | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import TestCase
from esrally import reporter
class FormatterTests(TestCase):
def setUp(self):
self.empty_header = ["Header"]
self.empty_data = []
self.metrics_header = ["Metric", "Task", "Baseline", "Contender", "Diff", "Unit"]
self.metrics_data = [
["Min Throughput", "index", "17300", "18000", "700", "ops/s"],
["Median Throughput", "index", "17500", "18500", "1000", "ops/s"],
["Max Throughput", "index", "17700", "19000", "1300", "ops/s"],
]
self.numbers_align = "right"
def test_formats_as_markdown(self):
formatted = reporter.format_as_markdown(self.empty_header, self.empty_data, self.numbers_align)
# 1 header line, 1 separation line + 0 data lines
self.assertEqual(1 + 1 + 0, len(formatted.splitlines()))
formatted = reporter.format_as_markdown(self.metrics_header, self.metrics_data, self.numbers_align)
# 1 header line, 1 separation line + 3 data lines
self.assertEqual(1 + 1 + 3, len(formatted.splitlines()))
def test_formats_as_csv(self):
formatted = reporter.format_as_csv(self.empty_header, self.empty_data)
# 1 header line, no separation line + 0 data lines
self.assertEqual(1 + 0, len(formatted.splitlines()))
formatted = reporter.format_as_csv(self.metrics_header, self.metrics_data)
# 1 header line, no separation line + 3 data lines
self.assertEqual(1 + 3, len(formatted.splitlines()))
| 43.301887 | 107 | 0.688889 |
c5e36527e52e231f3c914ff61d8f65896451843b | 2,288 | py | Python | samples/AddAdobeIDUser.py | Luci2015/umapi-documentation | a7eb376ffe13e6681f7ae8e314c60e803cc04fe6 | [
"MIT"
] | 7 | 2017-11-07T23:31:10.000Z | 2021-05-09T07:26:16.000Z | samples/AddAdobeIDUser.py | Luci2015/umapi-documentation | a7eb376ffe13e6681f7ae8e314c60e803cc04fe6 | [
"MIT"
] | 13 | 2017-09-12T16:48:28.000Z | 2022-03-24T11:55:28.000Z | samples/AddAdobeIDUser.py | Luci2015/umapi-documentation | a7eb376ffe13e6681f7ae8e314c60e803cc04fe6 | [
"MIT"
] | 17 | 2017-06-28T14:12:59.000Z | 2021-10-04T17:06:07.000Z | #!/usr/bin/python
#*************************************************************************
#
# ADOBE CONFIDENTIAL
# ___________________
#
# Copyright 2017 Adobe Systems Incorporated
# All Rights Reserved.
#
# NOTICE: Adobe permits you to use, modify, and distribute this file in
# accordance with the terms of the Adobe license agreement accompanying it.
# If you have received this file from a source other than Adobe, then your
# use, modification, or distribution of it requires the prior written
# permission of Adobe.
#**************************************************************************
import sys
if sys.version_info[0] == 2:
from ConfigParser import RawConfigParser
if sys.version_info[0] >= 3:
from configparser import RawConfigParser
import json
import requests
import random
import string
# read configuration file
config_file_name = "usermanagement.config"
config = RawConfigParser()
config.read(config_file_name)
# server parameters
host = config.get("server", "host")
endpoint = config.get("server", "endpoint")
# enterprise parameters
domain = config.get("enterprise", "domain")
org_id = config.get("enterprise", "org_id")
api_key = config.get("enterprise", "api_key")
access_token = config.get("enterprise", "access_token")
user_string = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(12))
# method parameters
url = "https://" + host + endpoint + "/action/" + org_id
headers = {
"Content-type" : "application/json",
"Accept" : "application/json",
"x-api-key" : api_key,
"Authorization" : "Bearer " + access_token
}
json_data = \
[
{
"user" : user_string + "@" + domain,
"do" : [
{
"addAdobeID" : {
"email" : user_string + "@" + domain,
"country" : "US"
}
}
]
}
]
# prepare body
body = json.dumps(json_data)
print("Sending following request body to User Management Action API: " + body)
# send http request
res = requests.post(url, headers=headers, data=body)
# print response
print(res.status_code)
print(res.headers)
print(res.text)
# parse response body
if res.status_code == 200:
res_json_data = json.loads(res.text)
result = res_json_data["result"]
if result == "success":
print("Success");
exit(res.status_code) | 26.298851 | 95 | 0.649913 |
16891fa4456acf2ca880d86fcab14dc1c8637cff | 2,776 | py | Python | 10.3389/fevo.2021.762173/scripts/print_image_capure_dates.py | jqsunac/doi | c5912a40c7bfda8270e5d51fbdd82a9f0650bd23 | [
"MIT"
] | null | null | null | 10.3389/fevo.2021.762173/scripts/print_image_capure_dates.py | jqsunac/doi | c5912a40c7bfda8270e5d51fbdd82a9f0650bd23 | [
"MIT"
] | null | null | null | 10.3389/fevo.2021.762173/scripts/print_image_capure_dates.py | jqsunac/doi | c5912a40c7bfda8270e5d51fbdd82a9f0650bd23 | [
"MIT"
] | null | null | null | import os
import sys
import glob
from PIL import Image
from PIL import ExifTags
from PIL.ExifTags import TAGS
# pwd
# # ~/projects/dragonfly/data/dataset_T
# python ../../scripts/print_image_capure_dates.py cropped_image
# python ../../scripts/print_image_capure_dates.py raw
def get_gps(fpath):
im = Image.open(fpath)
lat = None
lng = None
try:
exif = im._getexif()
exif = {
ExifTags.TAGS[k]: v for k, v in exif.items() if k in ExifTags.TAGS
}
if 'GPSInfo' in exif:
gps_tags = exif['GPSInfo']
gps = {
ExifTags.GPSTAGS.get(t, t): gps_tags[t] for t in gps_tags
}
is_lat = 'GPSLatitude' in gps
is_lat_ref = 'GPSLatitudeRef' in gps
is_lng = 'GPSLongitude' in gps
is_lng_ref = 'GPSLongitudeRef' in gps
if is_lat and is_lat_ref and is_lng and is_lng_ref:
lat = gps['GPSLatitude']
lat_ref = gps['GPSLatitudeRef']
if lat_ref == 'N':
lat_sign = 1.0
elif lat_ref == 'S':
lat_sign = -1.0
lng = gps['GPSLongitude']
lng_ref = gps['GPSLongitudeRef']
if lng_ref == 'E':
lng_sign = 1.0
elif lng_ref == 'W':
lng_sign = -1.0
lat = lat_sign * lat[0] + lat[1] / 60 + lat[2] / 3600
lng = lng_sign * lng[0] + lng[1] / 60 + lng[2] / 3600
except:
pass
# print(fpath + ' has no EXIF!')
return lat, lng
def get_captured_datetime(fpath):
captured_datetime = None
im = Image.open(fpath)
exif = im._getexif()
try:
for exif_id, exif_val in exif.items():
tag = TAGS.get(exif_id, exif_id)
if tag == 'DateTimeOriginal':
captured_datetime = exif_val
except:
pass
# print(fpath + ' has no EXIF!')
im.close()
return captured_datetime
def main(target_path):
n_images = 0
n_noexif = 0
datetimes = []
for fpath in glob.glob(os.path.join(target_path, '*', '*.jpg')):
n_images += 1
datetime = get_captured_datetime(fpath)
gps = get_gps(fpath)
if gps[0] is not None and datetime is not None:
datetimes.append(datetime)
print('{}\t{}\t{}\t{}'.format(fpath, datetime, gps[0], gps[1]))
else:
n_noexif += 1
print('#{}-{}'.format(sorted(datetimes)[0], sorted(datetimes)[-1]))
print('#{}, {}, {}'.format(n_images, n_noexif, n_images - n_noexif))
if __name__ == '__main__':
target_path = sys.argv[1]
main(target_path)
| 27.215686 | 79 | 0.524135 |
e205b0b7c9b53a2fe3c0ea438ae76670c28234f5 | 9,044 | py | Python | sdk/python/pulumi_google_native/compute/beta/get_http_health_check.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/compute/beta/get_http_health_check.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/compute/beta/get_http_health_check.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetHttpHealthCheckResult',
'AwaitableGetHttpHealthCheckResult',
'get_http_health_check',
'get_http_health_check_output',
]
@pulumi.output_type
class GetHttpHealthCheckResult:
def __init__(__self__, check_interval_sec=None, creation_timestamp=None, description=None, healthy_threshold=None, host=None, kind=None, name=None, port=None, request_path=None, self_link=None, timeout_sec=None, unhealthy_threshold=None):
if check_interval_sec and not isinstance(check_interval_sec, int):
raise TypeError("Expected argument 'check_interval_sec' to be a int")
pulumi.set(__self__, "check_interval_sec", check_interval_sec)
if creation_timestamp and not isinstance(creation_timestamp, str):
raise TypeError("Expected argument 'creation_timestamp' to be a str")
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if healthy_threshold and not isinstance(healthy_threshold, int):
raise TypeError("Expected argument 'healthy_threshold' to be a int")
pulumi.set(__self__, "healthy_threshold", healthy_threshold)
if host and not isinstance(host, str):
raise TypeError("Expected argument 'host' to be a str")
pulumi.set(__self__, "host", host)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if port and not isinstance(port, int):
raise TypeError("Expected argument 'port' to be a int")
pulumi.set(__self__, "port", port)
if request_path and not isinstance(request_path, str):
raise TypeError("Expected argument 'request_path' to be a str")
pulumi.set(__self__, "request_path", request_path)
if self_link and not isinstance(self_link, str):
raise TypeError("Expected argument 'self_link' to be a str")
pulumi.set(__self__, "self_link", self_link)
if timeout_sec and not isinstance(timeout_sec, int):
raise TypeError("Expected argument 'timeout_sec' to be a int")
pulumi.set(__self__, "timeout_sec", timeout_sec)
if unhealthy_threshold and not isinstance(unhealthy_threshold, int):
raise TypeError("Expected argument 'unhealthy_threshold' to be a int")
pulumi.set(__self__, "unhealthy_threshold", unhealthy_threshold)
@property
@pulumi.getter(name="checkIntervalSec")
def check_interval_sec(self) -> int:
"""
How often (in seconds) to send a health check. The default value is 5 seconds.
"""
return pulumi.get(self, "check_interval_sec")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> str:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> str:
"""
An optional description of this resource. Provide this property when you create the resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="healthyThreshold")
def healthy_threshold(self) -> int:
"""
A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2.
"""
return pulumi.get(self, "healthy_threshold")
@property
@pulumi.getter
def host(self) -> str:
"""
The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def kind(self) -> str:
"""
Type of the resource. Always compute#httpHealthCheck for HTTP health checks.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def port(self) -> int:
"""
The TCP port number for the HTTP health check request. The default value is 80.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="requestPath")
def request_path(self) -> str:
"""
The request path of the HTTP health check request. The default value is /. This field does not support query parameters.
"""
return pulumi.get(self, "request_path")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> str:
"""
Server-defined URL for the resource.
"""
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="timeoutSec")
def timeout_sec(self) -> int:
"""
How long (in seconds) to wait before claiming failure. The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec.
"""
return pulumi.get(self, "timeout_sec")
@property
@pulumi.getter(name="unhealthyThreshold")
def unhealthy_threshold(self) -> int:
"""
A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2.
"""
return pulumi.get(self, "unhealthy_threshold")
class AwaitableGetHttpHealthCheckResult(GetHttpHealthCheckResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetHttpHealthCheckResult(
check_interval_sec=self.check_interval_sec,
creation_timestamp=self.creation_timestamp,
description=self.description,
healthy_threshold=self.healthy_threshold,
host=self.host,
kind=self.kind,
name=self.name,
port=self.port,
request_path=self.request_path,
self_link=self.self_link,
timeout_sec=self.timeout_sec,
unhealthy_threshold=self.unhealthy_threshold)
def get_http_health_check(http_health_check: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHttpHealthCheckResult:
"""
Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.
"""
__args__ = dict()
__args__['httpHealthCheck'] = http_health_check
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:compute/beta:getHttpHealthCheck', __args__, opts=opts, typ=GetHttpHealthCheckResult).value
return AwaitableGetHttpHealthCheckResult(
check_interval_sec=__ret__.check_interval_sec,
creation_timestamp=__ret__.creation_timestamp,
description=__ret__.description,
healthy_threshold=__ret__.healthy_threshold,
host=__ret__.host,
kind=__ret__.kind,
name=__ret__.name,
port=__ret__.port,
request_path=__ret__.request_path,
self_link=__ret__.self_link,
timeout_sec=__ret__.timeout_sec,
unhealthy_threshold=__ret__.unhealthy_threshold)
@_utilities.lift_output_func(get_http_health_check)
def get_http_health_check_output(http_health_check: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetHttpHealthCheckResult]:
"""
Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.
"""
...
| 42.460094 | 444 | 0.66851 |
daec10219060314083d5dcdf96b05befbd5a2b21 | 12,165 | py | Python | examples/terran/ramp_wall.py | DrInfy/python-sc2 | c08b06b19dee8fd973dec9a2e2383f3ab76d8e77 | [
"MIT"
] | 1 | 2021-03-13T08:25:51.000Z | 2021-03-13T08:25:51.000Z | examples/terran/ramp_wall.py | DrInfy/python-sc2 | c08b06b19dee8fd973dec9a2e2383f3ab76d8e77 | [
"MIT"
] | 1 | 2021-11-11T11:51:12.000Z | 2021-11-11T11:51:12.000Z | examples/terran/ramp_wall.py | DrInfy/python-sc2 | c08b06b19dee8fd973dec9a2e2383f3ab76d8e77 | [
"MIT"
] | 1 | 2021-11-11T11:48:45.000Z | 2021-11-11T11:48:45.000Z | import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), "../.."))
import random, numpy as np
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.player import Bot, Computer
from sc2.position import Point2, Point3
from sc2.unit import Unit
from sc2.units import Units
from typing import List, Set
class RampWallBot(sc2.BotAI):
def __init__(self):
self.unit_command_uses_self_do = False
async def on_step(self, iteration):
ccs: Units = self.townhalls(UnitTypeId.COMMANDCENTER)
if not ccs:
return
else:
cc: Unit = ccs.first
await self.distribute_workers()
if self.can_afford(UnitTypeId.SCV) and self.workers.amount < 16 and cc.is_idle:
self.do(cc.train(UnitTypeId.SCV))
# Raise depos when enemies are nearby
for depo in self.structures(UnitTypeId.SUPPLYDEPOT).ready:
for unit in self.enemy_units:
if unit.distance_to(depo) < 15:
break
else:
depo(AbilityId.MORPH_SUPPLYDEPOT_LOWER)
# Lower depos when no enemies are nearby
for depo in self.structures(UnitTypeId.SUPPLYDEPOTLOWERED).ready:
for unit in self.enemy_units:
if unit.distance_to(depo) < 10:
depo(AbilityId.MORPH_SUPPLYDEPOT_RAISE)
break
# Draw ramp points
self.draw_ramp_points()
# Draw all detected expansions on the map
self.draw_expansions()
# # Draw pathing grid
# self.draw_pathing_grid()
# Draw placement grid
# self.draw_placement_grid()
# Draw vision blockers
# self.draw_vision_blockers()
# Draw visibility pixelmap for debugging purposes
# self.draw_visibility_pixelmap()
# Draw some example boxes around units, lines towards command center, text on the screen and barracks
# self.draw_example()
# Draw if two selected units are facing each other - green if this guy is facing the other, red if he is not
# self.draw_facing_units()
depot_placement_positions: Set[Point2] = self.main_base_ramp.corner_depots
# Uncomment the following if you want to build 3 supply depots in the wall instead of a barracks in the middle + 2 depots in the corner
# depot_placement_positions = self.main_base_ramp.corner_depots | {self.main_base_ramp.depot_in_middle}
barracks_placement_position: Point2 = self.main_base_ramp.barracks_correct_placement
# If you prefer to have the barracks in the middle without room for addons, use the following instead
# barracks_placement_position = self.main_base_ramp.barracks_in_middle
depots: Units = self.structures.of_type({UnitTypeId.SUPPLYDEPOT, UnitTypeId.SUPPLYDEPOTLOWERED})
# Filter locations close to finished supply depots
if depots:
depot_placement_positions: Set[Point2] = {
d for d in depot_placement_positions if depots.closest_distance_to(d) > 1
}
# Build depots
if self.can_afford(UnitTypeId.SUPPLYDEPOT) and self.already_pending(UnitTypeId.SUPPLYDEPOT) == 0:
if len(depot_placement_positions) == 0:
return
# Choose any depot location
target_depot_location: Point2 = depot_placement_positions.pop()
workers: Units = self.workers.gathering
if workers: # if workers were found
worker: Unit = workers.random
self.do(worker.build(UnitTypeId.SUPPLYDEPOT, target_depot_location))
# Build barracks
if depots.ready and self.can_afford(UnitTypeId.BARRACKS) and self.already_pending(UnitTypeId.BARRACKS) == 0:
if self.structures(UnitTypeId.BARRACKS).amount + self.already_pending(UnitTypeId.BARRACKS) > 0:
return
workers = self.workers.gathering
if workers and barracks_placement_position: # if workers were found
worker: Unit = workers.random
worker.build(UnitTypeId.BARRACKS, barracks_placement_position)
async def on_building_construction_started(self, unit: Unit):
print(f"Construction of building {unit} started at {unit.position}.")
async def on_building_construction_complete(self, unit: Unit):
print(f"Construction of building {unit} completed at {unit.position}.")
def draw_ramp_points(self):
for ramp in self.game_info.map_ramps:
for p in ramp.points:
h2 = self.get_terrain_z_height(p)
pos = Point3((p.x, p.y, h2))
color = Point3((255, 0, 0))
if p in ramp.upper:
color = Point3((0, 255, 0))
if p in ramp.upper2_for_ramp_wall:
color = Point3((0, 255, 255))
if p in ramp.lower:
color = Point3((0, 0, 255))
self._client.debug_box2_out(pos + Point2((0.5, 0.5)), half_vertex_length=0.25, color=color)
# Identical to above:
# p0 = Point3((pos.x + 0.25, pos.y + 0.25, pos.z + 0.25))
# p1 = Point3((pos.x + 0.75, pos.y + 0.75, pos.z - 0.25))
# print(f"Drawing {p0} to {p1}")
# self._client.debug_box_out(p0, p1, color=color)
def draw_expansions(self):
green = Point3((0, 255, 0))
for expansion_pos in self.expansion_locations_list:
height = self.get_terrain_z_height(expansion_pos)
expansion_pos3 = Point3((*expansion_pos, height))
self._client.debug_box2_out(expansion_pos3, half_vertex_length=2.5, color=green)
def draw_pathing_grid(self):
map_area = self._game_info.playable_area
for (b, a), value in np.ndenumerate(self._game_info.pathing_grid.data_numpy):
if value == 0:
continue
# Skip values outside of playable map area
if not (map_area.x <= a < map_area.x + map_area.width):
continue
if not (map_area.y <= b < map_area.y + map_area.height):
continue
p = Point2((a, b))
h2 = self.get_terrain_z_height(p)
pos = Point3((p.x, p.y, h2))
p0 = Point3((pos.x - 0.25, pos.y - 0.25, pos.z + 0.25)) + Point2((0.5, 0.5))
p1 = Point3((pos.x + 0.25, pos.y + 0.25, pos.z - 0.25)) + Point2((0.5, 0.5))
# print(f"Drawing {p0} to {p1}")
color = Point3((0, 255, 0))
self._client.debug_box_out(p0, p1, color=color)
def draw_placement_grid(self):
map_area = self._game_info.playable_area
for (b, a), value in np.ndenumerate(self._game_info.placement_grid.data_numpy):
if value == 0:
continue
# Skip values outside of playable map area
if not (map_area.x <= a < map_area.x + map_area.width):
continue
if not (map_area.y <= b < map_area.y + map_area.height):
continue
p = Point2((a, b))
h2 = self.get_terrain_z_height(p)
pos = Point3((p.x, p.y, h2))
p0 = Point3((pos.x - 0.25, pos.y - 0.25, pos.z + 0.25)) + Point2((0.5, 0.5))
p1 = Point3((pos.x + 0.25, pos.y + 0.25, pos.z - 0.25)) + Point2((0.5, 0.5))
# print(f"Drawing {p0} to {p1}")
color = Point3((0, 255, 0))
self._client.debug_box_out(p0, p1, color=color)
def draw_vision_blockers(self):
for p in self.game_info.vision_blockers:
h2 = self.get_terrain_z_height(p)
pos = Point3((p.x, p.y, h2))
p0 = Point3((pos.x - 0.25, pos.y - 0.25, pos.z + 0.25)) + Point2((0.5, 0.5))
p1 = Point3((pos.x + 0.25, pos.y + 0.25, pos.z - 0.25)) + Point2((0.5, 0.5))
# print(f"Drawing {p0} to {p1}")
color = Point3((255, 0, 0))
self._client.debug_box_out(p0, p1, color=color)
def draw_visibility_pixelmap(self):
for (y, x), value in np.ndenumerate(self.state.visibility.data_numpy):
p = Point2((x, y))
h2 = self.get_terrain_z_height(p)
pos = Point3((p.x, p.y, h2))
p0 = Point3((pos.x - 0.25, pos.y - 0.25, pos.z + 0.25)) + Point2((0.5, 0.5))
p1 = Point3((pos.x + 0.25, pos.y + 0.25, pos.z - 0.25)) + Point2((0.5, 0.5))
# Red
color = Point3((255, 0, 0))
# If value == 2: show green (= we have vision on that point)
if value == 2:
color = Point3((0, 255, 0))
self._client.debug_box_out(p0, p1, color=color)
def draw_example(self):
# Draw green boxes around SCVs if they are gathering, yellow if they are returning cargo, red the rest
scv: Unit
for scv in self.workers:
pos = scv.position3d
p0 = Point3((pos.x - 0.25, pos.y - 0.25, pos.z + 0.25))
p1 = Point3((pos.x + 0.25, pos.y + 0.25, pos.z - 0.25))
# Red
color = Point3((255, 0, 0))
if scv.is_gathering:
color = Point3((0, 255, 0))
elif scv.is_returning:
color = Point3((255, 255, 0))
self._client.debug_box_out(p0, p1, color=color)
# Draw lines from structures to command center
if self.townhalls:
cc = self.townhalls[0]
p0 = cc.position3d
if not self.structures:
return
structure: Unit
for structure in self.structures:
if structure == cc:
continue
p1 = structure.position3d
# Red
color = Point3((255, 0, 0))
self._client.debug_line_out(p0, p1, color=color)
# Draw text on barracks
if structure.type_id == UnitTypeId.BARRACKS:
# Blue
color = Point3((0, 0, 255))
pos = structure.position3d + Point3((0, 0, 0.5))
# TODO: Why is this text flickering
self._client.debug_text_world(text="MY RAX", pos=pos, color=color, size=16)
# Draw text in top left of screen
self._client.debug_text_screen(text="Hello world!", pos=Point2((0, 0)), color=None, size=16)
self._client.debug_text_simple(text="Hello world2!")
def draw_facing_units(self):
""" Draws green box on top of selected_unit2, if selected_unit2 is facing selected_unit1 """
selected_unit1: Unit
selected_unit2: Unit
red = Point3((255, 0, 0))
green = Point3((0, 255, 0))
for selected_unit1 in (self.units | self.structures).selected:
for selected_unit2 in self.units.selected:
if selected_unit1 == selected_unit2:
continue
if selected_unit2.is_facing_unit(selected_unit1):
self._client.debug_box2_out(selected_unit2, half_vertex_length=0.25, color=green)
else:
self._client.debug_box2_out(selected_unit2, half_vertex_length=0.25, color=red)
def main():
map = random.choice(
[
# Most maps have 2 upper points at the ramp (len(self.main_base_ramp.upper) == 2)
"AutomatonLE",
"BlueshiftLE",
"CeruleanFallLE",
"KairosJunctionLE",
"ParaSiteLE",
"PortAleksanderLE",
"StasisLE",
"DarknessSanctuaryLE",
"ParaSiteLE", # Has 5 upper points at the main ramp
"AcolyteLE", # Has 4 upper points at the ramp to the in-base natural and 2 upper points at the small ramp
"HonorgroundsLE", # Has 4 or 9 upper points at the large main base ramp
]
)
map = "PillarsofGoldLE"
sc2.run_game(
sc2.maps.get(map),
[Bot(Race.Terran, RampWallBot()), Computer(Race.Zerg, Difficulty.Hard)],
realtime=True,
# sc2_version="4.10.1",
)
if __name__ == "__main__":
main()
| 42.093426 | 143 | 0.5806 |
2b966e97d8a4b69af43efd3160e0d5484dfbf01e | 23,862 | py | Python | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/aio/operations/_my_workbooks_operations.py | moovy2/azure-sdk-for-python | 6b0495dc9917d47a7264f26cbd3221d43461a537 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/aio/operations/_my_workbooks_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2015_05_01/aio/operations/_my_workbooks_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MyWorkbooksOperations:
"""MyWorkbooksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.applicationinsights.v2015_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_resource_group(
self,
resource_group_name: str,
category: Union[str, "_models.CategoryType"],
tags: Optional[List[str]] = None,
can_fetch_content: Optional[bool] = None,
**kwargs: Any
) -> AsyncIterable["_models.MyWorkbooksListResult"]:
"""Get all private workbooks defined within a specified resource group and category.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param category: Category of workbook to return.
:type category: str or ~azure.mgmt.applicationinsights.v2015_05_01.models.CategoryType
:param tags: Tags presents on each workbook returned.
:type tags: list[str]
:param can_fetch_content: Flag indicating whether or not to return the full content for each
applicable workbook. If false, only return summary content for workbooks.
:type can_fetch_content: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MyWorkbooksListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.applicationinsights.v2015_05_01.models.MyWorkbooksListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MyWorkbooksListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['category'] = self._serialize.query("category", category, 'str')
if tags is not None:
query_parameters['tags'] = self._serialize.query("tags", tags, '[str]', div=',')
if can_fetch_content is not None:
query_parameters['canFetchContent'] = self._serialize.query("can_fetch_content", can_fetch_content, 'bool')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('MyWorkbooksListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.MyWorkbookError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/myWorkbooks'} # type: ignore
def list_by_subscription(
self,
category: Union[str, "_models.CategoryType"],
tags: Optional[List[str]] = None,
can_fetch_content: Optional[bool] = None,
**kwargs: Any
) -> AsyncIterable["_models.MyWorkbooksListResult"]:
"""Get all private workbooks defined within a specified subscription and category.
:param category: Category of workbook to return.
:type category: str or ~azure.mgmt.applicationinsights.v2015_05_01.models.CategoryType
:param tags: Tags presents on each workbook returned.
:type tags: list[str]
:param can_fetch_content: Flag indicating whether or not to return the full content for each
applicable workbook. If false, only return summary content for workbooks.
:type can_fetch_content: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MyWorkbooksListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.applicationinsights.v2015_05_01.models.MyWorkbooksListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MyWorkbooksListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['category'] = self._serialize.query("category", category, 'str')
if tags is not None:
query_parameters['tags'] = self._serialize.query("tags", tags, '[str]', div=',')
if can_fetch_content is not None:
query_parameters['canFetchContent'] = self._serialize.query("can_fetch_content", can_fetch_content, 'bool')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('MyWorkbooksListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.MyWorkbookError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Insights/myWorkbooks'} # type: ignore
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.MyWorkbook":
"""Get a single private workbook by its resourceName.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MyWorkbook, or the result of cls(response)
:rtype: ~azure.mgmt.applicationinsights.v2015_05_01.models.MyWorkbook
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MyWorkbook"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MyWorkbookError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MyWorkbook', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/myWorkbooks/{resourceName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
"""Delete a private workbook.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MyWorkbookError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/myWorkbooks/{resourceName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
workbook_properties: "_models.MyWorkbook",
**kwargs: Any
) -> "_models.MyWorkbook":
"""Create a new private workbook.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param workbook_properties: Properties that need to be specified to create a new private
workbook.
:type workbook_properties: ~azure.mgmt.applicationinsights.v2015_05_01.models.MyWorkbook
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MyWorkbook, or the result of cls(response)
:rtype: ~azure.mgmt.applicationinsights.v2015_05_01.models.MyWorkbook
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MyWorkbook"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(workbook_properties, 'MyWorkbook')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MyWorkbookError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MyWorkbook', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('MyWorkbook', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/myWorkbooks/{resourceName}'} # type: ignore
async def update(
self,
resource_group_name: str,
resource_name: str,
workbook_properties: "_models.MyWorkbook",
**kwargs: Any
) -> "_models.MyWorkbook":
"""Updates a private workbook that has already been added.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the Application Insights component resource.
:type resource_name: str
:param workbook_properties: Properties that need to be specified to create a new private
workbook.
:type workbook_properties: ~azure.mgmt.applicationinsights.v2015_05_01.models.MyWorkbook
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MyWorkbook, or the result of cls(response)
:rtype: ~azure.mgmt.applicationinsights.v2015_05_01.models.MyWorkbook
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MyWorkbook"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(workbook_properties, 'MyWorkbook')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.MyWorkbookError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MyWorkbook', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/myWorkbooks/{resourceName}'} # type: ignore
| 51.206009 | 181 | 0.669684 |
a1de296141d5a10278c5889388a542d96b25270d | 1,784 | py | Python | DistributionTools/DataPipeline/Windows/freeze.py | shambo001/peat | 7a26e896aa9914b084a9064df09ed15df4047cf3 | [
"MIT"
] | 3 | 2016-11-11T06:11:03.000Z | 2021-09-12T22:13:51.000Z | DistributionTools/DataPipeline/Windows/freeze.py | shambo001/peat | 7a26e896aa9914b084a9064df09ed15df4047cf3 | [
"MIT"
] | null | null | null | DistributionTools/DataPipeline/Windows/freeze.py | shambo001/peat | 7a26e896aa9914b084a9064df09ed15df4047cf3 | [
"MIT"
] | 2 | 2016-02-15T16:10:36.000Z | 2018-02-27T10:33:21.000Z | #!/usr/bin/env python
#bbfreeze setup file for DataPipeline distribution on Windows
#Damien Farrell, #Nov 2011
"""
This script can be used to create a standalone executable for
either windows or linux. It must be run on the target platform.
You will need to install bbfreeze, see http://pypi.python.org/pypi/bbfreeze/
"""
from bbfreeze import Freezer
import sys, os, shutil
shutil.rmtree('datapipeline', ignore_errors=True)
path=os.path.abspath('../../..')
pipepath=os.path.abspath('../../../DataPipeline')
peatpath=os.path.abspath('../../../PEATDB')
version = '1.2'
f = Freezer('datapipeline', excludes=('wx'))
f.addScript(os.path.join(pipepath, "PipelineApp.py"))
f.addScript(os.path.join(pipepath, "PipelineCommand.py"))
f.addScript(os.path.join(peatpath, "Ekin/ModelDesign.py"))
f.addScript(os.path.join(peatpath, "Ekin/Ekin_main.py"))
#these lines allow the plugins to work
f.addModule('PEATDB.PEATApp')
m=f.mf
f() # runs the freezing process
'''post freeze'''
#mpl data
import matplotlib
mpldir = matplotlib.get_data_path()
datadir = 'datapipeline/mpl-data'
shutil.copytree(mpldir, datadir)
#add resource files
resources = ['DataPipeline/app.ico',
'DataPipeline/modeldesign.ico',
'PEATDB/Ekin/Ekin.ico',
'PEATDB/Ekin/models.dict']
for r in resources:
shutil.copy(os.path.join(path, r), 'datapipeline')
tst = 'DataPipeline/testfiles'
shutil.copytree(os.path.join(path, tst), 'datapipeline/testfiles')
#make zip archive
import zipfile
f = zipfile.ZipFile("datapipeline-1.0.zip", "w")
for dirpath, dirnames, filenames in os.walk('datapipeline'):
for fname in filenames:
fullname = os.path.join(dirpath, fname)
f.write(fullname)
f.close()
| 30.237288 | 77 | 0.688341 |
5b5acbdbe12535f3d787b1bcbadb24521f45c0e7 | 2,587 | py | Python | vsts/vsts/build/v4_1/models/definition_reference.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/build/v4_1/models/definition_reference.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | vsts/vsts/build/v4_1/models/definition_reference.py | kenkuo/azure-devops-python-api | 9e920bd25e938fa89ff7f60153e5b9e113ca839d | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class DefinitionReference(Model):
"""DefinitionReference.
:param created_date: The date the definition was created.
:type created_date: datetime
:param id: The ID of the referenced definition.
:type id: int
:param name: The name of the referenced definition.
:type name: str
:param path: The folder path of the definition.
:type path: str
:param project: A reference to the project.
:type project: :class:`TeamProjectReference <build.v4_1.models.TeamProjectReference>`
:param queue_status: A value that indicates whether builds can be queued against this definition.
:type queue_status: object
:param revision: The definition revision number.
:type revision: int
:param type: The type of the definition.
:type type: object
:param uri: The definition's URI.
:type uri: str
:param url: The REST URL of the definition.
:type url: str
"""
_attribute_map = {
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
'project': {'key': 'project', 'type': 'TeamProjectReference'},
'queue_status': {'key': 'queueStatus', 'type': 'object'},
'revision': {'key': 'revision', 'type': 'int'},
'type': {'key': 'type', 'type': 'object'},
'uri': {'key': 'uri', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, created_date=None, id=None, name=None, path=None, project=None, queue_status=None, revision=None, type=None, uri=None, url=None):
super(DefinitionReference, self).__init__()
self.created_date = created_date
self.id = id
self.name = name
self.path = path
self.project = project
self.queue_status = queue_status
self.revision = revision
self.type = type
self.uri = uri
self.url = url
| 41.725806 | 152 | 0.565133 |
a089561d1635765689a84e338ced4ad78c012d4a | 12,781 | py | Python | switch_model/hawaii/reserves.py | DesmondZhong/switch | 88abc164128b6a7345c7aa8806e2b37f74de54fa | [
"ECL-2.0",
"Apache-2.0"
] | 100 | 2015-06-30T02:33:39.000Z | 2022-02-07T17:28:10.000Z | switch_model/hawaii/reserves.py | DesmondZhong/switch | 88abc164128b6a7345c7aa8806e2b37f74de54fa | [
"ECL-2.0",
"Apache-2.0"
] | 117 | 2015-07-07T00:45:01.000Z | 2021-05-29T18:55:06.000Z | switch_model/hawaii/reserves.py | DesmondZhong/switch | 88abc164128b6a7345c7aa8806e2b37f74de54fa | [
"ECL-2.0",
"Apache-2.0"
] | 70 | 2015-06-30T02:36:05.000Z | 2022-03-08T00:15:32.000Z | """
Defines types of reserve target and components that contribute to reserves,
and enforces the reserve targets.
"""
from __future__ import division
import os
from pyomo.environ import *
# TODO: use standard reserves module for this
def define_arguments(argparser):
argparser.add_argument('--reserves-from-storage', action='store_true', default=True,
help="Allow storage (batteries and hydrogen) to provide up- and down-reserves.")
argparser.add_argument('--no-reserves-from-storage', dest='reserves_from_storage',
action='store_false',
help="Don't allow storage (batteries and hydrogen) to provide up- and down-reserves.")
argparser.add_argument('--reserves-from-demand-response', action='store_true', default=True,
help="Allow demand response to provide up- and down-reserves.")
argparser.add_argument('--no-reserves-from-demand-response', dest='reserves_from_demand_response',
action='store_false',
help="Don't allow demand response to provide up- and down-reserves.")
def define_components(m):
"""
Note: In this simple model, we assume all reserves must be spinning. In more complex
models you could define products and portions of those products that must be spinning,
then use that to set the spinning reserve requirement.
Reserves don't have a deliverability requirement, so they are calculated for the whole region.
"""
# projects that can provide reserves
# TODO: add batteries, hydrogen and pumped storage to this
m.FIRM_GENS = Set(
initialize=m.GENERATION_PROJECTS,
#filter=lambda m, p: m.gen_energy_source[p] not in ['Wind', 'Solar']
)
m.FIRM_GEN_TPS = Set(
initialize=m.GEN_TPS,
filter=lambda m, p, tp: p in m.FIRM_GENS
)
m.CONTINGENCY_GENS = Set(
initialize=m.GENERATION_PROJECTS,
filter=lambda m, p: p in m.DISCRETELY_SIZED_GENS
)
m.CONTINGENCY_GEN_TPS = Set(
initialize=m.GEN_TPS,
filter=lambda m, p, tp: p in m.CONTINGENCY_GENS
)
# Calculate spinning reserve requirements.
# these parameters were found by regressing the reserve requirements from the GE RPS Study
# against wind and solar conditions each hour
# (see Dropbox/Research/Shared/Switch-Hawaii/ge_validation/source_data/reserve_requirements_oahu_scenarios charts.xlsx
# and Dropbox/Research/Shared/Switch-Hawaii/ge_validation/fit_renewable_reserves.ipynb )
# TODO: supply these parameters in input files
# regulating reserves required, as fraction of potential output (up to limit)
m.regulating_reserve_fraction = Param(['CentralTrackingPV', 'DistPV', 'OnshoreWind', 'OffshoreWind'], initialize={
'CentralTrackingPV': 1.0,
'DistPV': 1.0, # 0.81270193,
'OnshoreWind': 1.0,
'OffshoreWind': 1.0, # assumed equal to OnshoreWind
})
# maximum regulating reserves required, as fraction of installed capacity
m.regulating_reserve_limit = Param(['CentralTrackingPV', 'DistPV', 'OnshoreWind', 'OffshoreWind'], initialize={
'CentralTrackingPV': 0.21288916,
'DistPV': 0.21288916, # 0.14153171,
'OnshoreWind': 0.21624407,
'OffshoreWind': 0.21624407, # assumed equal to OnshoreWind
})
# more conservative values (found by giving 10x weight to times when we provide less reserves than GE):
# [1., 1., 1., 0.25760558, 0.18027923, 0.49123101]
m.RegulatingReserveRequirementMW = Expression(m.TIMEPOINTS, rule=lambda m, tp: sum(
m.GenCapacity[g, m.tp_period[tp]]
* min(
m.regulating_reserve_fraction[m.gen_tech[g]] * m.gen_max_capacity_factor[g, tp],
m.regulating_reserve_limit[m.gen_tech[g]]
)
for g in m.GENERATION_PROJECTS
if m.gen_tech[g] in m.regulating_reserve_fraction and (g, tp) in m.GEN_TPS
))
def define_dynamic_components(m):
# these are defined late, so they can check whether various components have been defined by other modules
# TODO: create a central registry for components that contribute to reserves
# Calculate contingency reserve requirements
m.ContingencyReserveUpRequirement = Var(m.TIMEPOINTS, within=NonNegativeReals)
# Apply a simple n-1 contingency reserve requirement;
# we treat each project as a separate contingency
# Note: we provide reserves for the full committed amount of each unit so that
# if any of the capacity is being used for regulating reserves, that will be backed
# up by contingency reserves.
# note: this uses a binary run/no-run flag, so it only provides one unit's worth of reserves
m.CommitGenFlag = Var(m.CONTINGENCY_GEN_TPS, within=Binary)
m.Set_CommitGenFlag = Constraint(
m.CONTINGENCY_GEN_TPS,
rule = lambda m, g, tp:
m.CommitGen[g, tp] <= m.CommitGenFlag[g, tp] * m.gen_capacity_limit_mw[g]
)
m.ContingencyReserveUpRequirement_Calculate = Constraint(
m.CONTINGENCY_GEN_TPS,
rule=lambda m, g, tp:
# m.ContingencyReserveUpRequirement[tp] >= m.CommitGen[g, tp]
m.ContingencyReserveUpRequirement[tp] >= m.CommitGenFlag[g, tp] * m.gen_unit_size[g]
)
m.ContingencyReserveDownRequirement = Var(m.TIMEPOINTS, within=NonNegativeReals)
# For now, we provide down reserves equal to 10% of all loads, including
# baseline load, demand response adjustment, electric vehicles, battery charging
# and hydrogen. It would be possible to split these into centralized and distributed
# loads and allocate separately for them (e.g., contingency reserves exceed
# 10% of total decentralized load and the size of the contingency for each
# centralized load; however, it's not obvious how to set the contingency for
# centralized loads, which are modular and may be divided between several locations.
# So we just assume we could lose 10% of all loads of any type, at any time.)
m.ContingencyReserveDownRequirement_Calculate = Constraint(
m.TIMEPOINTS,
rule=lambda m, tp:
m.ContingencyReserveDownRequirement[tp] >=
0.1 * sum(getattr(m, x)[z, tp] for x in m.Zone_Power_Withdrawals for z in m.LOAD_ZONES)
)
# Calculate total spinning reserve requirements
m.SpinningReserveUpRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp:
m.RegulatingReserveRequirementMW[tp] + m.ContingencyReserveUpRequirement[tp]
)
m.SpinningReserveDownRequirement = Expression(m.TIMEPOINTS, rule=lambda m, tp:
m.ContingencyReserveDownRequirement[tp]
)
# Available reserves
def expr(m, tp):
STORAGE_GENS = getattr(m, 'STORAGE_GENS', [])
# all regular generators; omit storage because they'll be added separately if needed
avail = sum(
m.DispatchSlackUp[g, tp]
for g in m.FIRM_GENS
if (g, tp) in m.GEN_TPS and g not in STORAGE_GENS
)
if m.options.reserves_from_storage:
# hawaii battery and hydrogen modules
if hasattr(m, 'BatterySlackUp'):
avail += sum(m.BatterySlackUp[z, tp] for z in m.LOAD_ZONES)
if hasattr(m, 'HydrogenSlackUp'):
avail += sum(m.HydrogenSlackUp[z, tp] for z in m.LOAD_ZONES)
# standard storage module (can stop charging and raise output to max)
avail += sum(
m.DispatchSlackUp[g, tp] + m.ChargeStorage[g, tp]
for g in STORAGE_GENS
if (g, tp) in m.GEN_TPS
)
if m.options.reserves_from_demand_response:
if hasattr(m, 'DemandUpReserves'):
avail += sum(m.DemandUpReserves[z, tp] for z in m.LOAD_ZONES)
if hasattr(m, 'ShiftDemand'):
avail += sum(m.ShiftDemand[z, tp] - m.ShiftDemand[z, tp].lb for z in m.LOAD_ZONES)
if hasattr(m, 'ChargeEVs') and hasattr(m.options, 'ev_timing') and m.options.ev_timing=='optimal':
avail += sum(m.ChargeEVs[z, tp] for z in m.LOAD_ZONES)
if hasattr(m, 'UnservedUpReserves'):
avail += m.UnservedUpReserves[tp]
# if tp == 2045012604:
# print "inspect avail to see up reserve calculation"
# import pdb; pdb.set_trace()
return avail
m.SpinningReservesUpAvailable = Expression(m.TIMEPOINTS, rule=expr)
def expr(m, tp):
STORAGE_GENS = getattr(m, 'STORAGE_GENS', [])
# all regular generators; omit storage because they'll be added separately if needed
avail = sum(
m.DispatchSlackDown[g, tp]
for g in m.FIRM_GENS
if (g, tp) in m.GEN_TPS and g not in STORAGE_GENS
)
if m.options.reserves_from_storage:
if hasattr(m, 'BatterySlackDown'):
avail += sum(m.BatterySlackDown[z, tp] for z in m.LOAD_ZONES)
if hasattr(m, 'HydrogenSlackDown'):
avail += sum(m.HydrogenSlackDown[z, tp] for z in m.LOAD_ZONES)
# standard storage module (can stop producing power and raise charging to max)
avail += sum(
m.DispatchSlackDown[g, tp]
+ m.DispatchUpperLimit[g, tp] * m.gen_store_to_release_ratio[g]
- m.ChargeStorage[g, tp]
for g in STORAGE_GENS
if (g, tp) in m.GEN_TPS
)
if m.options.reserves_from_demand_response:
if hasattr(m, 'DemandDownReserves'):
avail += sum(m.DemandDownReserves[z, tp] for z in m.LOAD_ZONES)
if hasattr(m, 'ShiftDemand'):
# avail += sum(m.ShiftDemand[z, tp].ub - m.ShiftDemand[z, tp] for z in m.LOAD_ZONES)
avail += sum(
24/3 * m.demand_response_max_share * m.zone_demand_mw[z, tp]
- m.ShiftDemand[z, tp]
for z in m.LOAD_ZONES
)
# note: we currently ignore down-reserves (option of increasing consumption)
# from EVs since it's not clear how high they could go; we could revisit this if
# down-reserves have a positive price at equilibrium (probabably won't)
if hasattr(m, 'UnservedDownReserves'):
avail += m.UnservedDownReserves[tp]
return avail
m.SpinningReservesDownAvailable = Expression(m.TIMEPOINTS, rule=expr)
# Meet the reserve requirements (we use zero on RHS to enforce the right sign for the duals)
m.Satisfy_Spinning_Reserve_Up_Requirement = Constraint(m.TIMEPOINTS, rule=lambda m, tp:
m.SpinningReservesUpAvailable[tp] - m.SpinningReserveUpRequirement[tp] >= 0
)
m.Satisfy_Spinning_Reserve_Down_Requirement = Constraint(m.TIMEPOINTS, rule=lambda m, tp:
m.SpinningReservesDownAvailable[tp] - m.SpinningReserveDownRequirement[tp] >= 0
)
# NOTE: the shutdown constraints below are not used, because they conflict with
# the baseload status set in build_scenario_data.py. You should set the plant type
# to "Off" in "source_data/Hawaii RPS Study Generator Table OCR.xlsx" instead.
# # shutdown Kahe_6
# m.KAHE_6_TIMEPOINTS = Set(initialize=lambda m: m.TPS_FOR_GEN['Kahe_6'])
# m.ShutdownGenCapacity_Kahe_6 = Constraint(m.KAHE_6_TIMEPOINTS, rule=lambda m, tp:
# m.CommitGen['Kahe_6', tp] == 0
# )
# # shutdown Kahe_1 and Kahe_2
# m.SHUTDOWN_TIMEPOINTS = Set(dimen=2, initialize=lambda m: [
# (p, tp) for p in ['Kahe_1', 'Kahe_2'] for tp in m.TPS_FOR_GEN[p]
# ])
# m.ShutdownGenCapacity_Projects = Constraint(m.SHUTDOWN_TIMEPOINTS, rule=lambda m, p, tp:
# m.CommitGen[p, tp] == 0
# )
# Force cycling plants to be online 0700-2000 and offline at other times
# (based on inspection of Fig. 8)
# project reporting types are defined in save_custom_results.py
# Note: this assumes timepoints are evenly spaced, and timeseries begin at midnight
# m.CYCLING_PLANTS_TIMEPOINTS = Set(dimen=2, initialize=lambda m: [
# (g, tp) for g in m.REPORTING_TYPE_GENS['Cycling']
# for tp in m.TPS_FOR_GEN[g]
# ])
# m.Cycle_Plants = Constraint(m.CYCLING_PLANTS_TIMEPOINTS, rule=lambda m, g, tp:
# m.CommitSlackUp[g, tp] == 0
# if (7 <= ((m.TPS_IN_TS[m.tp_ts[tp]].ord(tp)-1) * m.tp_duration_hrs[tp]) % 24 <= 20)
# else m.CommitGen[g, tp] == 0
# )
# def show_it(m):
# print "CYCLING_PLANTS_TIMEPOINTS:"
# print list(m.CYCLING_PLANTS_TIMEPOINTS)
# m.ShowCyclingPlants = BuildAction(rule=show_it)
# def load_inputs(m, switch_data, inputs_dir):
# switch_data.load_aug(
# filename=os.path.join(inputs_dir, 'reserve_requirements.csv'),
# auto_select=True,
# param=(m.RegulatingReserveRequirementMW))
| 48.596958 | 122 | 0.663485 |
ad49c3ae5021e21a1ae0682a40dbcbbde21dfcfc | 1,893 | py | Python | venv/lib/python3.8/site-packages/troposphere/servicediscovery.py | ayfallen/urler | d7bb5c83018a75cb4af2bbb7178bcf364b61f68f | [
"MIT"
] | 2 | 2021-04-03T06:34:08.000Z | 2022-01-14T22:27:02.000Z | venv/lib/python3.8/site-packages/troposphere/servicediscovery.py | ayfallen/urler | d7bb5c83018a75cb4af2bbb7178bcf364b61f68f | [
"MIT"
] | 6 | 2020-09-05T01:40:23.000Z | 2022-03-12T00:40:58.000Z | venv/lib/python3.8/site-packages/troposphere/servicediscovery.py | ayfallen/urler | d7bb5c83018a75cb4af2bbb7178bcf364b61f68f | [
"MIT"
] | 1 | 2020-09-05T00:19:03.000Z | 2020-09-05T00:19:03.000Z | # Copyright (c) 2012-2017, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty
class Instance(AWSObject):
resource_type = "AWS::ServiceDiscovery::Instance"
props = {
'InstanceAttributes': (dict, True),
'InstanceId': (str, False),
'ServiceId': (str, True),
}
class PrivateDnsNamespace(AWSObject):
resource_type = "AWS::ServiceDiscovery::PrivateDnsNamespace"
props = {
'Description': (str, False),
'Name': (str, True),
'Vpc': (str, True),
}
class PublicDnsNamespace(AWSObject):
resource_type = "AWS::ServiceDiscovery::PublicDnsNamespace"
props = {
'Description': (str, False),
'Name': (str, True),
}
class HealthCheckConfig(AWSProperty):
props = {
'FailureThreshold': (float, False),
'ResourcePath': (str, False),
'Type': (str, True),
}
class HealthCheckCustomConfig(AWSProperty):
props = {
'FailureThreshold': (float, True)
}
class DnsRecord(AWSProperty):
props = {
'TTL': (str, True),
'Type': (str, True),
}
class DnsConfig(AWSProperty):
props = {
'DnsRecords': ([DnsRecord], True),
'NamespaceId': (str, False),
'RoutingPolicy': (str, False),
}
class Service(AWSObject):
resource_type = "AWS::ServiceDiscovery::Service"
props = {
'Description': (str, False),
'DnsConfig': (DnsConfig, False),
'HealthCheckConfig': (HealthCheckConfig, False),
'HealthCheckCustomConfig': (HealthCheckCustomConfig, False),
'Name': (str, False),
'NamespaceId': (str, False),
}
class HttpNamespace(AWSObject):
resource_type = "AWS::ServiceDiscovery::HttpNamespace"
props = {
'Description': (str, False),
'Name': (str, True),
}
| 21.758621 | 68 | 0.59588 |
eb7b4851e6bc7cc39367095ac1f88cfc59e70ed6 | 5,702 | py | Python | pages/views.py | ahsohel/My_art | d3bf86b753533b5cc0dbdfd95c39e5ce1a46630e | [
"MIT",
"PostgreSQL",
"Unlicense"
] | null | null | null | pages/views.py | ahsohel/My_art | d3bf86b753533b5cc0dbdfd95c39e5ce1a46630e | [
"MIT",
"PostgreSQL",
"Unlicense"
] | null | null | null | pages/views.py | ahsohel/My_art | d3bf86b753533b5cc0dbdfd95c39e5ce1a46630e | [
"MIT",
"PostgreSQL",
"Unlicense"
] | null | null | null | from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.shortcuts import render, redirect, get_object_or_404
from .forms import ArtUploadForm, ArtworkDetail
from django.http import JsonResponse
from .models import Upload, ArtworkDetails as ArtworkDetailsModel
# Create your views here.
#########################
# DESIGN VIEWS
#########################
def designs_view(request):
return render(request, "pages/designs.html")
@login_required
def art_upload_view(request):
upload = Upload.objects.filter(user=request.user).last()
u_form = ArtUploadForm(instance=upload)
# print(u_form)
artwork_details = ArtworkDetailsModel.objects.filter(
user=request.user).last()
artwork_details_form = ArtworkDetail(instance=artwork_details)
context = {
'u_form': u_form,
'artwork_details_form': artwork_details_form,
'upload_images': Upload.objects.filter(user=request.user, is_uploaded=True),
"last_image": Upload.objects.filter(user=request.user).last(),
}
return render(request, "pages/design.html", context)
@login_required
def save_image(request):
if request.method == "POST":
if request.is_ajax():
title = request.POST.get('title', None)
image = request.FILES.get('image')
print(title, image)
# description = request.POST.get('up_description', None)
if image:
image = Upload.objects.create(
user=request.user, title=title, image=image)
image.save()
last_image = Upload.objects.filter(user=request.user).last()
return JsonResponse({'title': last_image.title, 'last_img_url': last_image.image.url})
else:
return JsonResponse({'status': False})
return JsonResponse({'status': True})
#
# @login_required
# def save_image_show_first(request):
# if request.method == "POST":
# if request.is_ajax():
# print('hello boss')
# print('hello boss')
# print('hello boss')
# print('hello boss')
#
# image = request.FILES.get('image')
# print(image)
# # description = request.POST.get('up_description', None)
# if image:
# image = Upload.objects.create(
# user=request.user, title=title, image=image)
# image.save()
# last_image = Upload.objects.filter(user=request.user).last()
# return JsonResponse({'title': last_image.title, 'last_img_url': last_image.image.url})
# else:
# return JsonResponse({'status': False})
# return JsonResponse({'status': True})
#
@login_required
def upload_at_work(request):
print('sohel i am here 2')
if request.method == "POST":
category = request.POST.get('category', None)
print('category')
print(category)
description = request.POST.get('description', None)
title = request.POST.get("title", None)
tag = request.POST.get("tag", None)
print(tag)
art_work = ArtworkDetailsModel.objects.filter(
user=request.user).first()
if art_work:
art_work.category = category
art_work.description = description
art_work.title = title
art_work.tag = tag
# print(art_work)
art_work.save()
return JsonResponse({'status': "updated artwork.."})
else:
art = ArtworkDetailsModel.objects.create(
user=request.user, title=title, tag=tag, category=category, description=description)
art.save()
return JsonResponse({'status': "created artwork.."})
# category: category,
# description: description,
# title: title,
# tag: tag,
# description: description,
# category: category
@login_required
def published_art_work(request):
if request.method == "POST":
is_uploaded = request.POST.get('is_uploaded', None)
# print(is_uploaded.title())
upload = Upload.objects.filter(user=request.user).last()
if upload and is_uploaded:
upload.is_uploaded = is_uploaded.title()
upload.save()
return redirect("design")
# upload = Upload.objects.filter(user=request.user).last()
# u_form = ArtUploadForm(instance=upload)
# artwork_details = ArtworkDetailsModel.objects.filter(
# user=request.user).last()
# artwork_details_form = ArtworkDetail(instance=artwork_details)
# context = {
# 'u_form': u_form,
# 'artwork_details_form': artwork_details_form,
# 'upload_images': Upload.objects.filter(user=request.user, is_uploaded=True),
# "last_image": Upload.objects.filter(user=request.user).last(),
# }
# return render(request, 'pages/ajax_reload_page.html', context)
return JsonResponse({'status': "uploaded successfully.."})
# def artwork_details_view(request):
# if request.method == 'POST':
# artwork_details_form = ArtworkDetails(request.POST, request.FILES, instance=request.user.artwork_details)
#
# if artwork_details_form.is_valid():
# artwork_details_form.save()
# messages.success(request, f'Your account has been updated!')
# return redirect('design')
# else:
# artwork_details_form = ArtworkDetails(instance=request.user.artwork_details)
#
# context = {
# 'artwork_details_form': artwork_details_form
# }
# return render(request, "pages/designs.html", context)
| 35.861635 | 115 | 0.627148 |
9f4c4c29d901762b8eee00a641ea509024dcdf9a | 4,648 | py | Python | tgs.py | Deeplayer/unet | c1eccd74becbba8bd12a79000dc2e232ea90a5c0 | [
"MIT"
] | null | null | null | tgs.py | Deeplayer/unet | c1eccd74becbba8bd12a79000dc2e232ea90a5c0 | [
"MIT"
] | null | null | null | tgs.py | Deeplayer/unet | c1eccd74becbba8bd12a79000dc2e232ea90a5c0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os, cv2, sys, random, warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm, tqdm_notebook
from skimage.io import imread, imshow, imread_collection, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from keras.models import Model, load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K
import tensorflow as tf
from InceptionUnet import inception_resnet_v2_fpn
from metrics import dice_bce_loss, dice_coef, optimizer
# Set some parameters
IMG_WIDTH = 139
IMG_HEIGHT = 139
IMG_CHANNELS = 1
# TRAIN_IMG_PATH = 'F:/PythonProjects/deepcare/images'
# TRAIN_MASK_PATH = 'F:/PythonProjects/deepcare/masks'
# TEST_PATH = 'F:/PythonProjects/deepcare/images_test'
TRAIN_IMG_PATH = '../unet/images'
TRAIN_MASK_PATH = '../unet/masks'
TEST_PATH = '../unet/images_test'
train_ids = next(os.walk(TRAIN_IMG_PATH))[2]
test_ids = next(os.walk(TEST_PATH))[2]
print(len(train_ids), len(test_ids))
X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32)
Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.float32)
X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32)
# prepare training set
for n, id in tqdm(enumerate(train_ids), total=len(train_ids)):
img = cv2.imread(TRAIN_IMG_PATH + '/' + id, 0)
img = cv2.resize(img, (IMG_HEIGHT, IMG_WIDTH))
X_train[n] = np.expand_dims(img, axis=-1)
mask = cv2.imread(TRAIN_MASK_PATH + '/' + id, 0)
mask = cv2.resize(mask, (IMG_HEIGHT, IMG_WIDTH))
Y_train[n] = np.expand_dims(mask, axis=-1)/255.
# prepare test set
sizes_test = []
for n, id in tqdm(enumerate(test_ids), total=len(test_ids)):
img = cv2.imread(TEST_PATH + '/' + id, 0)
sizes_test.append([img.shape[0], img.shape[1]])
img = cv2.resize(img, (IMG_HEIGHT, IMG_WIDTH))
X_test[n] = np.expand_dims(img, axis=-1)
# normalize images
X_train /= 255.
X_train -= 0.5
X_train *= 2.
X_test /= 255.
X_test -= 0.5
X_test *= 2.
''' Fit model '''
model = inception_resnet_v2_fpn((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
#
model.compile(optimizer=optimizer('adam', lr=1e-4), loss=dice_bce_loss, metrics=[dice_coef])
#model.compile(optimizer=optimizer('adam', lr=1e-3), loss=dice_bce_loss, metrics=[dice_coef])
#
earlystopper = EarlyStopping(patience=20, verbose=1)
checkpointer = ModelCheckpoint('../unet/model-tgs-1.h5', monitor='val_loss', mode='min', verbose=1, save_best_only=True)
results = model.fit(X_train, Y_train, validation_split=0.2, batch_size=16, epochs=40,
callbacks=[earlystopper, checkpointer], verbose=2)
# Predict on test
model = load_model('../unet/model-tgs-1.h5', custom_objects={'dice_coef': dice_coef})
preds_test = model.predict(X_test, verbose=1)
# Threshold predictions
# preds_train_t = (preds_train >= 0.5).astype(np.uint8)
# preds_val_t = (preds_val >= 0.5).astype(np.uint8)
preds_test_t = (preds_test >= 0.5).astype(np.uint8)
# Create list of upsampled test masks
preds_test_upsampled = []
for i in range(len(preds_test)):
preds_test_upsampled.append(cv2.resize(np.squeeze(preds_test[i]), (sizes_test[i][0], sizes_test[i][1])))
def RLenc(img, order='F', format=True):
"""
img is binary mask image, shape (r,c)
order is down-then-right, i.e. Fortran
format determines if the order needs to be preformatted (according to submission rules) or not
returns run length as an array or string (if format is True)
"""
bytes = img.reshape(img.shape[0] * img.shape[1], order=order)
runs = [] ## list of run lengths
r = 0 ## the current run length
pos = 1 ## count starts from 1 per WK
for c in bytes:
if (c == 0):
if r != 0:
runs.append((pos, r))
pos += r
r = 0
pos += 1
else:
r += 1
# if last run is unsaved (i.e. data ends with 1)
if r != 0:
runs.append((pos, r))
pos += r
r = 0
if format:
z = ''
for rr in runs:
z += '{} {} '.format(rr[0], rr[1])
return z[:-1]
else:
return runs
pred_dict = {fn[:-4]:RLenc(np.round(preds_test_upsampled[i])) for i,fn in tqdm_notebook(enumerate(test_ids))}
sub = pd.DataFrame.from_dict(pred_dict, orient='index')
sub.index.names = ['id']
sub.columns = ['rle_mask']
sub.to_csv('../unet/submission.csv')
| 32.503497 | 121 | 0.656627 |
757fd6ad47e84b1001435cccfaf4bbb14dc717d9 | 32,410 | py | Python | stomp/transport.py | Anthchirp/stomp.py | c7642e4f9a0b50ef1a245427506539f0c4888cbb | [
"Apache-2.0"
] | null | null | null | stomp/transport.py | Anthchirp/stomp.py | c7642e4f9a0b50ef1a245427506539f0c4888cbb | [
"Apache-2.0"
] | null | null | null | stomp/transport.py | Anthchirp/stomp.py | c7642e4f9a0b50ef1a245427506539f0c4888cbb | [
"Apache-2.0"
] | 1 | 2020-03-22T12:55:31.000Z | 2020-03-22T12:55:31.000Z | """Provides the underlying transport functionality (for stomp message transmission) - (mostly) independent from the actual STOMP protocol
"""
import errno
from io import BytesIO
import logging
import math
import random
import re
import socket
import sys
import threading
import time
import warnings
try:
import ssl
from ssl import SSLError
DEFAULT_SSL_VERSION = ssl.PROTOCOL_TLSv1
except (ImportError, AttributeError): # python version < 2.6 without the backported ssl module
ssl = None
class SSLError(object):
pass
DEFAULT_SSL_VERSION = None
try:
from socket import SOL_SOCKET, SO_KEEPALIVE
from socket import SOL_TCP, TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT
LINUX_KEEPALIVE_AVAIL = True
except ImportError:
LINUX_KEEPALIVE_AVAIL = False
from stomp.backward import decode, encode, get_errno, monotonic, pack
from stomp.backwardsock import get_socket
import stomp.exception as exception
import stomp.listener
import stomp.utils as utils
log = logging.getLogger('stomp.py')
class BaseTransport(stomp.listener.Publisher):
"""
Base class for transport classes providing support for listeners, threading overrides,
and anything else outside of actually establishing a network connection, sending and
receiving of messages (so generally socket-agnostic functions).
:param bool wait_on_receipt: deprecated, ignored
:param bool auto_decode: automatically decode message responses as strings, rather than
leaving them as bytes. This preserves the behaviour as of version 4.0.16.
(To be defaulted to False as of the next release)
"""
#
# Used to parse the STOMP "content-length" header lines,
#
__content_length_re = re.compile(b'^content-length[:]\\s*(?P<value>[0-9]+)', re.MULTILINE)
def __init__(self, wait_on_receipt=False, auto_decode=True):
self.__recvbuf = b''
self.listeners = {}
self.running = False
self.blocking = None
self.connected = False
self.connection_error = False
self.__receipts = {}
self.current_host_and_port = None
# flag used when we receive the disconnect receipt
self.__disconnect_receipt = None
# function for creating threads used by the connection
self.create_thread_fc = utils.default_create_thread
self.__receiver_thread_exit_condition = threading.Condition()
self.__receiver_thread_exited = False
self.__send_wait_condition = threading.Condition()
self.__connect_wait_condition = threading.Condition()
self.__auto_decode = auto_decode
def override_threading(self, create_thread_fc):
"""
Override for thread creation. Use an alternate threading library by
setting this to a function with a single argument (which is the receiver loop callback).
The thread which is returned should be started (ready to run)
:param function create_thread_fc: single argument function for creating a thread
"""
self.create_thread_fc = create_thread_fc
#
# Manage the connection
#
def start(self):
"""
Start the connection. This should be called after all
listeners have been registered. If this method is not called,
no frames will be received by the connection.
"""
self.running = True
self.attempt_connection()
receiver_thread = self.create_thread_fc(self.__receiver_loop)
receiver_thread.name = "StompReceiver%s" % getattr(receiver_thread, "name", "Thread")
self.notify('connecting')
def stop(self):
"""
Stop the connection. Performs a clean shutdown by waiting for the
receiver thread to exit.
"""
with self.__receiver_thread_exit_condition:
while not self.__receiver_thread_exited:
self.__receiver_thread_exit_condition.wait()
def is_connected(self):
"""
:rtype: bool
"""
return self.connected
def set_connected(self, connected):
"""
:param bool connected:
"""
with self.__connect_wait_condition:
self.connected = connected
if connected:
self.__connect_wait_condition.notify()
#
# Manage objects listening to incoming frames
#
def set_listener(self, name, listener):
"""
Set a named listener to use with this connection.
See :py:class:`stomp.listener.ConnectionListener`
:param str name: the name of the listener
:param ConnectionListener listener: the listener object
"""
self.listeners[name] = listener
def remove_listener(self, name):
"""
Remove a listener according to the specified name
:param str name: the name of the listener to remove
"""
del self.listeners[name]
def get_listener(self, name):
"""
Return the named listener
:param str name: the listener to return
:rtype: ConnectionListener
"""
return self.listeners.get(name)
def process_frame(self, f, frame_str):
"""
:param Frame f: Frame object
:param bytes frame_str: raw frame content
"""
frame_type = f.cmd.lower()
if frame_type in ['connected', 'message', 'receipt', 'error', 'heartbeat']:
if frame_type == 'message':
(f.headers, f.body) = self.notify('before_message', f.headers, f.body)
if log.isEnabledFor(logging.DEBUG):
log.debug("Received frame: %r, headers=%r, body=%r", f.cmd, f.headers, f.body)
else:
log.info("Received frame: %r, headers=%r, len(body)=%r", f.cmd, f.headers, utils.length(f.body))
self.notify(frame_type, f.headers, f.body)
else:
log.warning("Unknown response frame type: '%s' (frame length was %d)", frame_type, utils.length(frame_str))
def notify(self, frame_type, headers=None, body=None):
"""
Utility function for notifying listeners of incoming and outgoing messages
:param str frame_type: the type of message
:param dict headers: the map of headers associated with the message
:param body: the content of the message
"""
if frame_type == 'receipt':
# logic for wait-on-receipt notification
receipt = headers['receipt-id']
with self.__send_wait_condition:
self.__receipts[receipt] = None
self.__send_wait_condition.notify()
# received a stomp 1.1+ disconnect receipt
if receipt == self.__disconnect_receipt:
self.disconnect_socket()
elif frame_type == 'connected':
self.set_connected(True)
elif frame_type == 'disconnected':
self.set_connected(False)
for listener in self.listeners.values():
if not listener:
continue
notify_func = getattr(listener, 'on_%s' % frame_type, None)
if not notify_func:
log.debug("listener %s has no method on_%s", listener, frame_type)
continue
if frame_type in ('heartbeat', 'disconnected'):
notify_func()
continue
if frame_type == 'connecting':
notify_func(self.current_host_and_port)
continue
if frame_type == 'error' and not self.connected:
with self.__connect_wait_condition:
self.connection_error = True
self.__connect_wait_condition.notify()
rtn = notify_func(headers, body)
if rtn:
(headers, body) = rtn
return (headers, body)
def transmit(self, frame):
"""
Convert a frame object to a frame string and transmit to the server.
:param Frame frame: the Frame object to transmit
"""
for listener in self.listeners.values():
if not listener:
continue
try:
listener.on_send(frame)
except AttributeError:
continue
lines = utils.convert_frame_to_lines(frame)
packed_frame = pack(lines)
if log.isEnabledFor(logging.DEBUG):
log.debug("Sending frame: %s", lines)
else:
log.info("Sending frame: %r, headers=%r", frame.cmd or "heartbeat", frame.headers)
self.send(encode(packed_frame))
def send(self, encoded_frame):
"""
Send an encoded frame over this transport (to be implemented in subclasses)
:param bytes encoded_frame: a Frame object which has been encoded for transmission
"""
pass
def receive(self):
"""
Receive a chunk of data (to be implemented in subclasses)
:rtype: bytes
"""
pass
def cleanup(self):
"""
Cleanup the transport (to be implemented in subclasses)
"""
pass
def attempt_connection(self):
"""
Attempt to establish a connection.
"""
pass
def disconnect_socket(self):
"""
Disconnect the socket.
"""
def wait_for_connection(self, timeout=None):
"""
Wait until we've established a connection with the server.
:param float timeout: how long to wait, in seconds
"""
if timeout is not None:
wait_time = timeout / 10.0
else:
wait_time = None
with self.__connect_wait_condition:
while not self.is_connected() and not self.connection_error:
self.__connect_wait_condition.wait(wait_time)
def __receiver_loop(self):
"""
Main loop listening for incoming data.
"""
log.info("Starting receiver loop")
try:
while self.running:
try:
while self.running:
frames = self.__read()
for frame in frames:
f = utils.parse_frame(frame)
if self.__auto_decode:
f.body = decode(f.body)
self.process_frame(f, frame)
except exception.ConnectionClosedException:
if self.running:
self.notify('disconnected')
#
# Clear out any half-received messages after losing connection
#
self.__recvbuf = b''
self.running = False
break
finally:
self.cleanup()
finally:
with self.__receiver_thread_exit_condition:
self.__receiver_thread_exited = True
self.__receiver_thread_exit_condition.notifyAll()
log.info("Receiver loop ended")
def __read(self):
"""
Read the next frame(s) from the socket.
:return: list of frames read
:rtype: list(bytes)
"""
fastbuf = BytesIO()
while self.running:
try:
try:
c = self.receive()
except exception.InterruptedException:
log.debug("socket read interrupted, restarting")
continue
except Exception:
log.debug("socket read error", exc_info=True)
c = b''
if len(c) == 0:
raise exception.ConnectionClosedException()
if c == b'\x0a' and not self.__recvbuf and not fastbuf.tell():
#
# EOL to an empty receive buffer: treat as heartbeat.
# Note that this may misdetect an optional EOL at end of frame as heartbeat in case the
# previous receive() got a complete frame whose NUL at end of frame happened to be the
# last byte of that read. But that should be harmless in practice.
#
fastbuf.close()
return [c]
fastbuf.write(c)
if b'\x00' in c:
#
# Possible end of frame
#
break
self.__recvbuf += fastbuf.getvalue()
fastbuf.close()
result = []
if self.__recvbuf and self.running:
while True:
pos = self.__recvbuf.find(b'\x00')
if pos >= 0:
frame = self.__recvbuf[0:pos]
preamble_end_match = utils.PREAMBLE_END_RE.search(frame)
if preamble_end_match:
preamble_end = preamble_end_match.start()
content_length_match = BaseTransport.__content_length_re.search(frame[0:preamble_end])
if content_length_match:
content_length = int(content_length_match.group('value'))
content_offset = preamble_end_match.end()
frame_size = content_offset + content_length
if frame_size > len(frame):
#
# Frame contains NUL bytes, need to read more
#
if frame_size < len(self.__recvbuf):
pos = frame_size
frame = self.__recvbuf[0:pos]
else:
#
# Haven't read enough data yet, exit loop and wait for more to arrive
#
break
result.append(frame)
pos += 1
#
# Ignore optional EOLs at end of frame
#
while self.__recvbuf[pos:pos + 1] == b'\x0a':
pos += 1
self.__recvbuf = self.__recvbuf[pos:]
else:
break
return result
class Transport(BaseTransport):
"""
Represents a STOMP client 'transport'. Effectively this is the communications mechanism without the definition of
the protocol.
:param list((str,int)) host_and_ports: a list of (host, port) tuples
:param bool prefer_localhost: if True and the local host is mentioned in the (host,
port) tuples, try to connect to this first
:param bool try_loopback_connect: if True and the local host is found in the host
tuples, try connecting to it using loopback interface
(127.0.0.1)
:param float reconnect_sleep_initial: initial delay in seconds to wait before reattempting
to establish a connection if connection to any of the
hosts fails.
:param float reconnect_sleep_increase: factor by which the sleep delay is increased after
each connection attempt. For example, 0.5 means
to wait 50% longer than before the previous attempt,
1.0 means wait twice as long, and 0.0 means keep
the delay constant.
:param float reconnect_sleep_max: maximum delay between connection attempts, regardless
of the reconnect_sleep_increase.
:param float reconnect_sleep_jitter: random additional time to wait (as a percentage of
the time determined using the previous parameters)
between connection attempts in order to avoid
stampeding. For example, a value of 0.1 means to wait
an extra 0%-10% (randomly determined) of the delay
calculated using the previous three parameters.
:param int reconnect_attempts_max: maximum attempts to reconnect
:param bool use_ssl: deprecated, see :py:meth:`set_ssl`
:param ssl_cert_file: deprecated, see :py:meth:`set_ssl`
:param ssl_key_file: deprecated, see :py:meth:`set_ssl`
:param ssl_ca_certs: deprecated, see :py:meth:`set_ssl`
:param ssl_cert_validator: deprecated, see :py:meth:`set_ssl`
:param ssl_version: deprecated, see :py:meth:`set_ssl`
:param timeout: the timeout value to use when connecting the stomp socket
:param bool wait_on_receipt: deprecated, ignored
:param keepalive: some operating systems support sending the occasional heart
beat packets to detect when a connection fails. This
parameter can either be set set to a boolean to turn on the
default keepalive options for your OS, or as a tuple of
values, which also enables keepalive packets, but specifies
options specific to your OS implementation
:param str vhost: specify a virtual hostname to provide in the 'host' header of the connection
"""
def __init__(self,
host_and_ports=None,
prefer_localhost=True,
try_loopback_connect=True,
reconnect_sleep_initial=0.1,
reconnect_sleep_increase=0.5,
reconnect_sleep_jitter=0.1,
reconnect_sleep_max=60.0,
reconnect_attempts_max=3,
use_ssl=False,
ssl_key_file=None,
ssl_cert_file=None,
ssl_ca_certs=None,
ssl_cert_validator=None,
wait_on_receipt=False,
ssl_version=None,
timeout=None,
keepalive=None,
vhost=None,
auto_decode=True
):
BaseTransport.__init__(self, wait_on_receipt, auto_decode)
if host_and_ports is None:
host_and_ports = [('localhost', 61613)]
sorted_host_and_ports = []
sorted_host_and_ports.extend(host_and_ports)
#
# If localhost is preferred, make sure all (host, port) tuples that refer to the local host come first in
# the list
#
if prefer_localhost:
sorted_host_and_ports.sort(key=utils.is_localhost)
#
# If the user wishes to attempt connecting to local ports using the loopback interface, for each (host, port)
# tuple referring to a local host, add an entry with the host name replaced by 127.0.0.1 if it doesn't
# exist already
#
loopback_host_and_ports = []
if try_loopback_connect:
for host_and_port in sorted_host_and_ports:
if utils.is_localhost(host_and_port) == 1:
port = host_and_port[1]
if not (("127.0.0.1", port) in sorted_host_and_ports or ("localhost", port) in sorted_host_and_ports):
loopback_host_and_ports.append(("127.0.0.1", port))
#
# Assemble the final, possibly sorted list of (host, port) tuples
#
self.__host_and_ports = []
self.__host_and_ports.extend(loopback_host_and_ports)
self.__host_and_ports.extend(sorted_host_and_ports)
self.__reconnect_sleep_initial = reconnect_sleep_initial
self.__reconnect_sleep_increase = reconnect_sleep_increase
self.__reconnect_sleep_jitter = reconnect_sleep_jitter
self.__reconnect_sleep_max = reconnect_sleep_max
self.__reconnect_attempts_max = reconnect_attempts_max
self.__timeout = timeout
self.socket = None
self.__socket_semaphore = threading.BoundedSemaphore(1)
self.current_host_and_port = None
# setup SSL
self.__ssl_params = {}
if use_ssl:
warnings.warn("Deprecated: use set_ssl instead", DeprecationWarning)
self.set_ssl(host_and_ports,
ssl_key_file,
ssl_cert_file,
ssl_ca_certs,
ssl_cert_validator,
ssl_version)
self.__keepalive = keepalive
self.vhost = vhost
def is_connected(self):
"""
Return true if the socket managed by this connection is connected
:rtype: bool
"""
try:
return self.socket is not None and self.socket.getsockname()[1] != 0 and BaseTransport.is_connected(self)
except socket.error:
return False
def disconnect_socket(self):
"""
Disconnect the underlying socket connection
"""
self.running = False
if self.socket is not None:
if self.__need_ssl():
#
# Even though we don't want to use the socket, unwrap is the only API method which does a proper SSL
# shutdown
#
try:
self.socket = self.socket.unwrap()
except Exception:
#
# unwrap seems flaky on Win with the back-ported ssl mod, so catch any exception and log it
#
_, e, _ = sys.exc_info()
log.warning(e)
elif hasattr(socket, 'SHUT_RDWR'):
try:
self.socket.shutdown(socket.SHUT_RDWR)
except socket.error:
_, e, _ = sys.exc_info()
# ignore when socket already closed
if get_errno(e) != errno.ENOTCONN:
log.warning("Unable to issue SHUT_RDWR on socket because of error '%s'", e)
#
# split this into a separate check, because sometimes the socket is nulled between shutdown and this call
#
if self.socket is not None:
try:
self.socket.close()
except socket.error:
_, e, _ = sys.exc_info()
log.warning("Unable to close socket because of error '%s'", e)
self.current_host_and_port = None
def send(self, encoded_frame):
"""
:param bytes encoded_frame:
"""
if self.socket is not None:
try:
with self.__socket_semaphore:
self.socket.sendall(encoded_frame)
except Exception:
_, e, _ = sys.exc_info()
log.error("Error sending frame", exc_info=1)
raise e
else:
raise exception.NotConnectedException()
def receive(self):
"""
:rtype: bytes
"""
try:
return self.socket.recv(1024)
except socket.error:
_, e, _ = sys.exc_info()
if get_errno(e) in (errno.EAGAIN, errno.EINTR):
log.debug("socket read interrupted, restarting")
raise exception.InterruptedException()
raise
def cleanup(self):
"""
Close the socket and clear the current host and port details.
"""
try:
self.socket.close()
except:
pass # ignore errors when attempting to close socket
self.socket = None
self.current_host_and_port = None
def __enable_keepalive(self):
def try_setsockopt(sock, name, fam, opt, val):
if val is None:
return True # no value to set always works
try:
sock.setsockopt(fam, opt, val)
log.info("keepalive: set %r option to %r on socket", name, val)
except:
log.error("keepalive: unable to set %r option to %r on socket", name, val)
return False
return True
ka = self.__keepalive
if not ka:
return
if ka is True:
ka_sig = 'auto'
ka_args = ()
else:
try:
ka_sig = ka[0]
ka_args = ka[1:]
except Exception:
log.error("keepalive: bad specification %r", ka)
return
if ka_sig == 'auto':
if LINUX_KEEPALIVE_AVAIL:
ka_sig = 'linux'
ka_args = None
log.info("keepalive: autodetected linux-style support")
else:
log.error("keepalive: unable to detect any implementation, DISABLED!")
return
if ka_sig == 'linux':
log.info("keepalive: activating linux-style support")
if ka_args is None:
log.info("keepalive: using system defaults")
ka_args = (None, None, None)
lka_idle, lka_intvl, lka_cnt = ka_args
if try_setsockopt(self.socket, 'enable', SOL_SOCKET, SO_KEEPALIVE, 1):
try_setsockopt(self.socket, 'idle time', SOL_TCP, TCP_KEEPIDLE, lka_idle)
try_setsockopt(self.socket, 'interval', SOL_TCP, TCP_KEEPINTVL, lka_intvl)
try_setsockopt(self.socket, 'count', SOL_TCP, TCP_KEEPCNT, lka_cnt)
else:
log.error("keepalive: implementation %r not recognized or not supported", ka_sig)
def attempt_connection(self):
"""
Try connecting to the (host, port) tuples specified at construction time.
"""
self.connection_error = False
sleep_exp = 1
connect_count = 0
while self.running and self.socket is None and connect_count < self.__reconnect_attempts_max:
for host_and_port in self.__host_and_ports:
try:
log.info("Attempting connection to host %s, port %s", host_and_port[0], host_and_port[1])
self.socket = get_socket(host_and_port[0], host_and_port[1], self.__timeout)
self.__enable_keepalive()
need_ssl = self.__need_ssl(host_and_port)
if need_ssl: # wrap socket
ssl_params = self.get_ssl(host_and_port)
if ssl_params['ca_certs']:
cert_validation = ssl.CERT_REQUIRED
else:
cert_validation = ssl.CERT_NONE
try:
tls_context = ssl.create_default_context(cafile=ssl_params['ca_certs'])
except AttributeError:
tls_context = None
if tls_context:
# Wrap the socket for TLS
certfile = ssl_params['cert_file']
keyfile = ssl_params['key_file']
if certfile and not keyfile:
keyfile = certfile
if certfile:
tls_context.load_cert_chain(certfile, keyfile)
if cert_validation is None or cert_validation == ssl.CERT_NONE:
tls_context.check_hostname = False
tls_context.verify_mode = cert_validation
self.socket = tls_context.wrap_socket(self.socket, server_hostname=host_and_port[0])
else:
# Old-style wrap_socket where we don't have a modern SSLContext (so no SNI)
self.socket = ssl.wrap_socket(
self.socket,
keyfile=ssl_params['key_file'],
certfile=ssl_params['cert_file'],
cert_reqs=cert_validation,
ca_certs=ssl_params['ca_certs'],
ssl_version=ssl_params['ssl_version'])
self.socket.settimeout(self.__timeout)
if self.blocking is not None:
self.socket.setblocking(self.blocking)
#
# Validate server cert
#
if need_ssl and ssl_params['cert_validator']:
cert = self.socket.getpeercert()
(ok, errmsg) = ssl_params['cert_validator'](cert, host_and_port[0])
if not ok:
raise SSLError("Server certificate validation failed: %s", errmsg)
self.current_host_and_port = host_and_port
log.info("Established connection to host %s, port %s", host_and_port[0], host_and_port[1])
break
except socket.error:
self.socket = None
connect_count += 1
log.warning("Could not connect to host %s, port %s", host_and_port[0], host_and_port[1], exc_info=1)
if self.socket is None:
sleep_duration = (min(self.__reconnect_sleep_max,
((self.__reconnect_sleep_initial / (1.0 + self.__reconnect_sleep_increase))
* math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp)))
* (1.0 + random.random() * self.__reconnect_sleep_jitter))
sleep_end = monotonic() + sleep_duration
log.debug("Sleeping for %.1f seconds before attempting reconnect", sleep_duration)
while self.running and monotonic() < sleep_end:
time.sleep(0.2)
if sleep_duration < self.__reconnect_sleep_max:
sleep_exp += 1
if not self.socket:
raise exception.ConnectFailedException()
def set_ssl(self,
for_hosts=(),
key_file=None,
cert_file=None,
ca_certs=None,
cert_validator=None,
ssl_version=DEFAULT_SSL_VERSION):
"""
Sets up SSL configuration for the given hosts. This ensures socket is wrapped in a SSL connection, raising an
exception if the SSL module can't be found.
:param for_hosts: hosts this SSL configuration should be applied to
:param cert_file: the path to a X509 certificate
:param key_file: the path to a X509 key file
:param ca_certs: the path to the a file containing CA certificates to validate the server against.
If this is not set, server side certificate validation is not done.
:param cert_validator: function which performs extra validation on the client certificate, for example
checking the returned certificate has a commonName attribute equal to the
hostname (to avoid man in the middle attacks).
The signature is: (OK, err_msg) = validation_function(cert, hostname)
where OK is a boolean, and cert is a certificate structure
as returned by ssl.SSLSocket.getpeercert()
:param ssl_version: SSL protocol to use for the connection. This should be one of the PROTOCOL_x
constants provided by the ssl module. The default is ssl.PROTOCOL_TLSv1
"""
if not ssl:
raise Exception("SSL connection requested, but SSL library not found")
for host_port in for_hosts:
self.__ssl_params[host_port] = dict(key_file=key_file,
cert_file=cert_file,
ca_certs=ca_certs,
cert_validator=cert_validator,
ssl_version=ssl_version)
def __need_ssl(self, host_and_port=None):
"""
Whether current host needs SSL or not.
:param (str,int) host_and_port: the host/port pair to check, default current_host_and_port
"""
if not host_and_port:
host_and_port = self.current_host_and_port
return host_and_port in self.__ssl_params
def get_ssl(self, host_and_port=None):
"""
Get SSL params for the given host.
:param (str,int) host_and_port: the host/port pair we want SSL params for, default current_host_and_port
"""
if not host_and_port:
host_and_port = self.current_host_and_port
return self.__ssl_params.get(host_and_port)
| 39.284848 | 137 | 0.56535 |
0ad8962b65b67ab35539ee7c23e143cf89cb0428 | 83,666 | py | Python | pysnmp-with-texts/Wellfleet-IPX-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/Wellfleet-IPX-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/Wellfleet-IPX-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Wellfleet-IPX-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Wellfleet-IPX-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:40:39 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, ObjectIdentity, ModuleIdentity, iso, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Unsigned32, Counter64, IpAddress, MibIdentifier, Integer32, Opaque, NotificationType, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "ObjectIdentity", "ModuleIdentity", "iso", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Unsigned32", "Counter64", "IpAddress", "MibIdentifier", "Integer32", "Opaque", "NotificationType", "Gauge32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
wfIpxGroup, = mibBuilder.importSymbols("Wellfleet-COMMON-MIB", "wfIpxGroup")
wfIpxBase = MibIdentifier((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1))
wfIpxBaseDelete = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseDelete.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseDelete.setDescription('Create/Delete parameter. Default is created. Users perform an SNMP SET operation on this object in order to create/delete IPX.')
wfIpxBaseDisable = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseDisable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseDisable.setDescription('Enable/Disable parameter. Default is enabled. Users perform an SNMP SET operation on this object in order to enable/disable IPX.')
wfIpxBaseState = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("in", 3), ("notpresent", 4))).clone('notpresent')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseState.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseState.setDescription('The current state of the entire IPX.')
wfIpxBaseCfgHostNumber = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 4), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseCfgHostNumber.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseCfgHostNumber.setDescription('Host Number of Router and potential MAC address of box. This host number will be used to overide the default box generated hostnumber.')
wfIpxBaseActiveHostNumber = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseActiveHostNumber.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseActiveHostNumber.setDescription('Host Number of Router and potential MAC address of box. This is not a restart variable because this will be filled in from the IPX code after reading it from either the configurable host number or from a box generated variable.')
wfIpxBaseNetCount = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseNetCount.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseNetCount.setDescription('Count the total number of nets in the box.')
wfIpxBaseServiceCount = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseServiceCount.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseServiceCount.setDescription('Count the total number of saps in the box.')
wfIpxBaseLogFilter = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 16, 3, 17, 18, 19))).clone(namedValues=NamedValues(("debug", 1), ("info", 2), ("trace", 16), ("debuginfo", 3), ("debugtrace", 17), ("infotrace", 18), ("debuginfotrace", 19))).clone('trace')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseLogFilter.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseLogFilter.setDescription('Filter out some log messages, Default filters out debugs, info, and trace messages.')
wfIpxBaseNetTblSize = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseNetTblSize.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseNetTblSize.setDescription('Tell me how much space to set aside when creating the forwarding and network tables.')
wfIpxBaseRouterName = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 10), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseRouterName.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseRouterName.setDescription('The Name of the Router, used for IPXWAN')
wfIpxBasePrimaryNetNumber = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 11), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBasePrimaryNetNumber.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBasePrimaryNetNumber.setDescription("The 'interfernal' network number, used for IPXWAN")
wfIpxBaseRipMethod = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("metric", 1), ("tick", 2))).clone('tick')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseRipMethod.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseRipMethod.setDescription('Choose either Metric/Hops based or Tick based routing')
wfIpxBaseMaximumPath = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseMaximumPath.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseMaximumPath.setDescription('Declare Maximum number equal costs paths allowed for a given destination network')
wfIpxBaseHostCount = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseHostCount.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseHostCount.setDescription('Count the total number of hosts in the box.')
wfIpxBaseMultipleHostAddrs = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseMultipleHostAddrs.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseMultipleHostAddrs.setDescription("Allow the user to configure one host/mac addr for the whole box (default) where, if configured, the CfgHostNumber is used (becomes ActiveHostNubmer) otherwise if left unconfigured the ActiveHostNumber is discerned from the backplane serial number. If Multiple host is Enabled and the host number is configured in the interface record the configured value is used as the hostid/mac addr of the interface (except Token Ring which can't go into promiscous mode, uses Token Ring Mac as hostid/mac of interface). If the host number in left unconfigured then the hostid/mac is gotten from the mac of the circuit.")
wfIpxBaseNovellCertificationConformance = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseNovellCertificationConformance.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseNovellCertificationConformance.setDescription('enable/disable novell certification, disable/enable wellfleet specifics')
wfIpxBaseTrigUpdateEn = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseTrigUpdateEn.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseTrigUpdateEn.setDescription('enable/disable triggered rip updates for Goldman Sax')
wfIpxBaseNetSizeBoundEn = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseNetSizeBoundEn.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseNetSizeBoundEn.setDescription('enable/disable maximum allowed size of the network table')
wfIpxBaseMaxNetTblSize = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 19), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseMaxNetTblSize.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseMaxNetTblSize.setDescription('maximum allowed size of the network table')
wfIpxBaseNetTblFillNotify = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100)).clone(100)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxBaseNetTblFillNotify.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxBaseNetTblFillNotify.setDescription('notify when (configured) percentage is reached')
wfIpxBaseRtEntryTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 2), )
if mibBuilder.loadTexts: wfIpxBaseRtEntryTable.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRtEntryTable.setDescription("The table of Elements in IPX's routing table")
wfIpxBaseRtEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 2, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxBaseRouteDest"))
if mibBuilder.loadTexts: wfIpxBaseRtEntry.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRtEntry.setDescription('An entry in the Routing Table')
wfIpxBaseRouteDest = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 2, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRouteDest.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRouteDest.setDescription('Multiple routes to a single destination can appear in the table, but access to such multiple entries is dependent on the table- access mechanisms defined by the network management protocol in use.')
wfIpxBaseRouteIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRouteIfIndex.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRouteIfIndex.setDescription('The index value which uniquely identifies the local interface through which the next hop of this route should be reached. The interface identified by a particular value of this index is the same interface as identified by the same value of ifIndex.')
wfIpxBaseRouteMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRouteMetric.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRouteMetric.setDescription("The primary routing metric for this route. The semantics of this metric are determined by the routing-protocol specified in the route's ipRouteProto value. If this metric is not used, its value should be set to -1.")
wfIpxBaseRouteNextHopNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 2, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRouteNextHopNetwork.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRouteNextHopNetwork.setDescription("In the case of a route bound to an interface which is realized via a broadcast media, the value of this field is the agent's IPX address on that interface.")
wfIpxBaseRouteNextHopHost = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 2, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRouteNextHopHost.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRouteNextHopHost.setDescription('Full network.host_id address of nexthop host for this network.')
wfIpxBaseRouteType = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("direct", 3), ("indirect", 4), ("static", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRouteType.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRouteType.setDescription('The type of route. Note that the values direct(3) and indirect(4) refer to the notion of direct and indirect routing in the IPX architecture. Setting this object to the value invalid(2) has the effect of invalidating the corresponding entry in the ipRouteTable object. That is, it effectively dissasociates the destination identified with said entry from the route identified with said entry. It is an implementation-specific matter as to whether the agent removes an invalidated entry from the table. nagement stations must be prepared to receive tabular information from agents that corresponds to entries not currently in use. Proper interpretation of such entries requires examination of the relevant ipRouteType object.')
wfIpxBaseRouteProto = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("local", 2), ("netmgmt", 3), ("rip", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRouteProto.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRouteProto.setDescription('The routing mechanism via which this route was learned. Inclusion of values for gateway routing protocols is not intended to imply that hosts should support those protocols.')
wfIpxBaseRouteAge = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRouteAge.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRouteAge.setDescription("The number of seconds since this route was last updated or otherwise determined to be correct. Note that no semantics of `too old' can be implied except through knowledge of the routing protocol by which the route was learned.")
wfIpxBaseRouteInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 2, 1, 9), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRouteInfo.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRouteInfo.setDescription('A user-defined string which describes this Route entry')
wfIpxBaseRt2EntryTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13), )
if mibBuilder.loadTexts: wfIpxBaseRt2EntryTable.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRt2EntryTable.setDescription("The table of Elements in IPX's routing table")
wfIpxBaseRt2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxBaseRoute2Proto"), (0, "Wellfleet-IPX-MIB", "wfIpxBaseRoute2Dest"), (0, "Wellfleet-IPX-MIB", "wfIpxBaseRoute2NextHopNetwork"), (0, "Wellfleet-IPX-MIB", "wfIpxBaseRoute2NextHopHost"))
if mibBuilder.loadTexts: wfIpxBaseRt2Entry.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRt2Entry.setDescription('An entry in the Routing Table')
wfIpxBaseRoute2Dest = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRoute2Dest.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRoute2Dest.setDescription('Multiple routes to a single destination can appear in the table, but access to such multiple entries is dependent on the table- access mechanisms defined by the network management protocol in use.')
wfIpxBaseRoute2IfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRoute2IfIndex.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRoute2IfIndex.setDescription('The index value which uniquely identifies the local interface through which the next hop of this route should be reached. The interface identified by a particular value of this index is the same interface as identified by the same value of ifIndex.')
wfIpxBaseRoute2Ticks = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRoute2Ticks.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRoute2Ticks.setDescription("The primary routing metric (in ticks) for this route. The semantics of this metric are determined by the routing-protocol specified in the route's ipRouteProto value. If this metric is not used, its value should be set to -1.")
wfIpxBaseRoute2Hops = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRoute2Hops.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRoute2Hops.setDescription("The primary routing metric (in hops) for this route. The semantics of this metric are determined by the routing-protocol specified in the route's ipRouteProto value. If this metric is not used, its value should be set to -1.")
wfIpxBaseRoute2NextHopNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRoute2NextHopNetwork.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRoute2NextHopNetwork.setDescription("In the case of a route bound to an interface which is realized via a broadcast media, the value of this field is the agent's IPX address on that interface.")
wfIpxBaseRoute2NextHopHost = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(6, 6)).setFixedLength(6)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRoute2NextHopHost.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRoute2NextHopHost.setDescription('Full network.host_id address of nexthop host for this network.')
wfIpxBaseRoute2Type = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("invalid", 2), ("direct", 3), ("indirect", 4), ("static", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRoute2Type.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRoute2Type.setDescription('The type of route. Note that the values direct(3) and indirect(4) refer to the notion of direct and indirect routing in the IPX architecture. Setting this object to the value invalid(2) has the effect of invalidating the corresponding entry in the ipRouteTable object. That is, it effectively dissasociates the destination identified with said entry from the route identified with said entry. It is an implementation-specific matter as to whether the agent removes an invalidated entry from the table. nagement stations must be prepared to receive tabular information from agents that corresponds to entries not currently in use. Proper interpretation of such entries requires examination of the relevant ipRouteType object.')
wfIpxBaseRoute2Proto = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("local", 2), ("netmgmt", 3), ("rip", 4), ("nlsp", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRoute2Proto.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRoute2Proto.setDescription('The routing mechanism via which this route was learned. Inclusion of values for gateway routing protocols is not intended to imply that hosts should support those protocols.')
wfIpxBaseRoute2Age = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRoute2Age.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRoute2Age.setDescription("The number of seconds since this route was last updated or otherwise determined to be correct. Note that no semantics of `too old' can be implied except through knowledge of the routing protocol by which the route was learned.")
wfIpxBaseRoute2Info = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 13, 1, 10), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseRoute2Info.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseRoute2Info.setDescription('A user-defined string which describes this Route entry')
wfIpxBaseSapEntryTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 3), )
if mibBuilder.loadTexts: wfIpxBaseSapEntryTable.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseSapEntryTable.setDescription("The table of Elements in IPX's SAP table")
wfIpxBaseSapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 3, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxBaseSapIndex"))
if mibBuilder.loadTexts: wfIpxBaseSapEntry.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseSapEntry.setDescription('An entry in the Routing Table')
wfIpxBaseSapType = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 3, 1, 1), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseSapType.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseSapType.setDescription('Multiple routes to a single destination can appear in the table, but access to such multiple entries is dependent on the table- access mechanisms defined by the network management protocol in use.')
wfIpxBaseSapNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 3, 1, 2), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseSapNetwork.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseSapNetwork.setDescription('The network address of this service.')
wfIpxBaseSapHost = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 3, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseSapHost.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseSapHost.setDescription('The host address for this service.')
wfIpxBaseSapSocket = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 3, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseSapSocket.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseSapSocket.setDescription('The socket for this particular service')
wfIpxBaseSapName = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 3, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseSapName.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseSapName.setDescription('The server name.')
wfIpxBaseSapAge = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 3, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseSapAge.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseSapAge.setDescription('The number of seconds since SAP entry has been updated.')
wfIpxBaseSapHops = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 3, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseSapHops.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseSapHops.setDescription('The number of Hops away service is from router.')
wfIpxBaseSapIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 3, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(14, 14)).setFixedLength(14)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseSapIndex.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseSapIndex.setDescription('Index string comprised of type.network.hostid.socket used to uniquely index this server.')
wfIpxBaseSapIntf = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 3, 1, 9), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxBaseSapIntf.setStatus('obsolete')
if mibBuilder.loadTexts: wfIpxBaseSapIntf.setDescription('Network Address of Nexthop Interface.')
wfIpxInterfaceTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4), )
if mibBuilder.loadTexts: wfIpxInterfaceTable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceTable.setDescription('The network interface record')
wfIpxInterfaceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxInterfaceNetworkNumber"), (0, "Wellfleet-IPX-MIB", "wfIpxInterfaceCircuit"))
if mibBuilder.loadTexts: wfIpxInterfaceEntry.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceEntry.setDescription('An entry in the Routing Table')
wfIpxInterfaceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceIndex.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceIndex.setDescription('A unique value for each IPX interface. Its value ranges between 1 and the value of MAXCIRCUITS.')
wfIpxInterfaceDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceDelete.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceDelete.setDescription('Create/Delete parameter. Default is created. Users perform an SNMP SET operation on this object in order to create/delete an IPX interface.')
wfIpxInterfaceDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceDisable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceDisable.setDescription('Enable/Disable parameter. Default is enabled. Users perform an SNMP SET operation on this object in order to enable/disable an IPX interface.')
wfIpxInterfaceState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("init", 3), ("notpresent", 4))).clone('notpresent')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceState.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceState.setDescription('The current state of the IPX interface')
wfIpxInterfaceCircuit = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceCircuit.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceCircuit.setDescription('The Circuit Number that this interface runs over')
wfIpxInterfaceNetworkNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceNetworkNumber.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceNetworkNumber.setDescription("The IPX Address to which this entry's addressing information pertains")
wfIpxInterfaceCost = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("cost", 1))).clone('cost')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceCost.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceCost.setDescription('The Cost associated with the IPX Address of this entry')
wfIpxInterfaceXsumOn = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceXsumOn.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceXsumOn.setDescription('Flag for checksums')
wfIpxInterfaceCfgEncaps = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("ethernet", 1), ("lsap", 2), ("novell", 3), ("snap", 4), ("ppp", 5))).clone('ethernet')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceCfgEncaps.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceCfgEncaps.setDescription('The encapsulation method associated with this interface chosen by the user but not necessarily the one used. (See wfIpxInterfaceEncaps)')
wfIpxInterfaceMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 10), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceMacAddress.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceMacAddress.setDescription('The MacAddress of this interface this port will receive or transmit.')
wfIpxInterfaceSMDSGroupAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 11), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceSMDSGroupAddress.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceSMDSGroupAddress.setDescription('The SMDS group address')
wfIpxInterfaceMaxInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceMaxInfo.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceMaxInfo.setDescription('The maximum size of the INFO (non-MAC) field that this port will receive or transmit.')
wfIpxInterfaceInReceives = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceInReceives.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceInReceives.setDescription('The total number of input datagrams received from interfaces, including those received in error.')
wfIpxInterfaceInHdrErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceInHdrErrors.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceInHdrErrors.setDescription('The number of input datagrams discarded due to errors in their IPX headers, including bad checksums, version number mismatch, other format errors, time-to-live exceeded, errors discovered in processing their IPX options, etc.')
wfIpxInterfaceInAddrErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceInAddrErrors.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceInAddrErrors.setDescription("The number of input datagrams discarded because the IPX address in their IPX header's destination field was not a valid address to be received at this entity.")
wfIpxInterfaceForwDatagrams = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceForwDatagrams.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceForwDatagrams.setDescription('The number of input datagrams for which this entity was not their final IPX destination, as a result of which an attempt was made to find a route to forward them to that final destination. In entities which do not act as IPX Gateways, this counter will include only those packets which were Source-Routed via this entity, and the Source- Route option processing was successful.')
wfIpxInterfaceInUnknownProtos = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceInUnknownProtos.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceInUnknownProtos.setDescription('The number of locally-addressed datagrams received successfully but discarded because of an unknown or unsupported protocol.')
wfIpxInterfaceInDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceInDiscards.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceInDiscards.setDescription('The number of input IPX datagrams for which no problems were encountered to prevent their continued processing, but which were discarded (e.g., for lack of buffer space). Note that this counter does not include any datagrams discarded while awaiting re-assembly.')
wfIpxInterfaceInDelivers = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceInDelivers.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceInDelivers.setDescription('The total number of input datagrams successfully delivered to IPX user-protocols.')
wfIpxInterfaceOutRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceOutRequests.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceOutRequests.setDescription('The total number of IPX datagrams which local IPX user-protocols supplied to IPX in requests for transmission. Note that this counter does not include any datagrams counted in ipxForwDatagrams.')
wfIpxInterfaceOutDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceOutDiscards.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceOutDiscards.setDescription('The number of output IPX datagrams for which no problem was encountered to prevent their transmission to their destination, but which were discarded (e.g., for lack of buffer space). Note that this counter would include datagrams counted in ipForwDatagrams if any such packets met this (discretionary) discard criterion.')
wfIpxInterfaceOutNoRoutes = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceOutNoRoutes.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceOutNoRoutes.setDescription("The number of IPX datagrams discarded because no route could be found to transmit them to their destination. Note that this counter includes any packets counted in ipForwDatagrams which meet this `no-route' criterion. Note that this includes any datagarms which a host cannot route because all of its default gateways are down.")
wfIpxInterfaceTrEndStation = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceTrEndStation.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceTrEndStation.setDescription('Source Routing flag')
wfIpxInterfaceNetbiosAccept = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceNetbiosAccept.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceNetbiosAccept.setDescription('Accept NetBios All Networks Broadcast Packets')
wfIpxInterfaceNetbiosDeliver = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceNetbiosDeliver.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceNetbiosDeliver.setDescription('Deliver NetBios All Networks Broadcast Packets')
wfIpxInterfaceWanSapPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2))).clone(namedValues=NamedValues(("default", 2))).clone('default')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceWanSapPeriod.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceWanSapPeriod.setDescription('Configurable timer for SAP updates over LAN-WAN')
wfIpxInterfaceFRBcast = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 27), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceFRBcast.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceFRBcast.setDescription('Configurable Frame Relay broadcast address')
wfIpxInterfaceFRMcast = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 28), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceFRMcast.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceFRMcast.setDescription('Configurable Frame Relay multicast address')
wfIpxInterfaceEncaps = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("ethernet", 1), ("lsap", 2), ("novell", 3), ("snap", 4), ("ppp", 5))).clone('ethernet')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceEncaps.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceEncaps.setDescription("The Active or final encapsulation method associated with this interface as determined by the router's implementation of encapsulation algorithms (rules, restrictions) based on media type.")
wfIpxInterfaceSplit = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceSplit.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceSplit.setDescription('Enable or Disable Split Horizon algorithm for this interface Comes into to play for non full-meshed WAN networks (Frame Relay)')
wfIpxInterfaceCacheHit = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceCacheHit.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceCacheHit.setDescription('This counter was added for performance investigation under the same category as the number of Nets and number SAP counters. THis counter Will provide information that will determine the usefullness of caching.')
wfIpxInterfaceIpxWanDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceIpxWanDisable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceIpxWanDisable.setDescription('Enable IPXWAN negotiations')
wfIpxInterfaceIpxWanCommonNet = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 33), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceIpxWanCommonNet.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceIpxWanCommonNet.setDescription('Common network number for IPXWAN link')
wfIpxInterfaceIpxWanTimeOut = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 34), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(60))).clone(namedValues=NamedValues(("default", 60))).clone('default')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceIpxWanTimeOut.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceIpxWanTimeOut.setDescription('IPXWAN timer out period for negotiation')
wfIpxInterfaceIpxWanLinkRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 35), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(5))).clone(namedValues=NamedValues(("default", 5))).clone('default')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceIpxWanLinkRetry.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceIpxWanLinkRetry.setDescription('IPXWAN number of times to retry negotiation')
wfIpxInterfaceWanRipPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 36), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2))).clone(namedValues=NamedValues(("default", 2))).clone('default')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceWanRipPeriod.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceWanRipPeriod.setDescription('Configurable timer for routing updates LAN-WAN')
wfIpxInterfaceCfgHostNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 37), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxInterfaceCfgHostNumber.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceCfgHostNumber.setDescription('Host Number and possible MAC address of interface. This host number will be used to overide the default cct generated hostnumber (if Multiple Host enabled). This field is not valid/used if the IpxBaseMultipleHost is not enabled, if the media is non-promiscuous (Token Ring) or if the field length is not 6 bytes.')
wfIpxInterfaceActiveHostNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 4, 1, 38), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxInterfaceActiveHostNumber.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxInterfaceActiveHostNumber.setDescription('Host Number and possible MAC address of interface. This field will always be filled in by the router and lists the HostNumber of each Interface.')
wfIpxRipIntfTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 5), )
if mibBuilder.loadTexts: wfIpxRipIntfTable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxRipIntfTable.setDescription('The list of RIP interfaces')
wfIpxRipIntfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 5, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxRipInterfaceIndex"))
if mibBuilder.loadTexts: wfIpxRipIntfEntry.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxRipIntfEntry.setDescription('An entry in the Routing Table')
wfIpxRipInterfaceDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 5, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxRipInterfaceDelete.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxRipInterfaceDelete.setDescription('Create/Delete parameter. Default is created. Users perform an SNMP SET operation on this object in order to create/delete an RIP Interface instance.')
wfIpxRipInterfaceDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxRipInterfaceDisable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxRipInterfaceDisable.setDescription('Enable/Disable parameter. Default is enabled. Users perform an SNMP SET operation on this object in order to enable/disable an RIP instance.')
wfIpxRipInterfaceState = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("init", 3), ("notpresent", 4))).clone('notpresent')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxRipInterfaceState.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxRipInterfaceState.setDescription('The current state of RIP on this interface.')
wfIpxRipInterfaceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 5, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxRipInterfaceIndex.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxRipInterfaceIndex.setDescription('The IPX interface to run RIP on (instance id ).')
wfIpxRipInterfaceSupply = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 5, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxRipInterfaceSupply.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxRipInterfaceSupply.setDescription('Transmit RIP packets.')
wfIpxRipInterfaceListen = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxRipInterfaceListen.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxRipInterfaceListen.setDescription('Receive RIP packets.')
wfIpxAdjacentHostTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 6), )
if mibBuilder.loadTexts: wfIpxAdjacentHostTable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxAdjacentHostTable.setDescription('Table of configured Static Hosts This is implemented for hosts that are accessible over Frame Relay, SMDS etc. where a corresponding DLCI address must be configured for a nexthop host.')
wfIpxAdjacentHostEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 6, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxAhTargHostNetwork"), (0, "Wellfleet-IPX-MIB", "wfIpxAhTargHostId"))
if mibBuilder.loadTexts: wfIpxAdjacentHostEntry.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxAdjacentHostEntry.setDescription('An entry in the Adjacent Host Table')
wfIpxAhDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxAhDelete.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxAhDelete.setDescription('Create/Delete parameter. Default is created. Users perform an SNMP SET operation on this object in order to create/delete the IPX Static Host.')
wfIpxAhDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxAhDisable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxAhDisable.setDescription('Enable/Disable parameter. Default is enabled. Users perform an SNMP SET operation on this object in order to enable/disable this IPX Static Route.')
wfIpxAhTargHostNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 6, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxAhTargHostNetwork.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxAhTargHostNetwork.setDescription('The network address of the static host.')
wfIpxAhTargHostId = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 6, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(6, 6)).setFixedLength(6)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxAhTargHostId.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxAhTargHostId.setDescription('The Address of this static host.')
wfIpxAhNextHopIntf = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 6, 1, 5), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxAhNextHopIntf.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxAhNextHopIntf.setDescription('The Next Hop Interface Network Address')
wfIpxAhDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 6, 1, 6), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxAhDlci.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxAhDlci.setDescription('The next Hop Host address')
wfIpxStaticRouteTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 7), )
if mibBuilder.loadTexts: wfIpxStaticRouteTable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticRouteTable.setDescription('The list of static routes used for Configuring IPX')
wfIpxStaticRouteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 7, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxSrTargNetwork"), (0, "Wellfleet-IPX-MIB", "wfIpxSrNextHopNetwork"))
if mibBuilder.loadTexts: wfIpxStaticRouteEntry.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticRouteEntry.setDescription('An entry in the Routing Table')
wfIpxSrDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 7, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSrDelete.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSrDelete.setDescription('Create/Delete parameter. Default is created. Users perform an SNMP SET operation on this object in order to create/delete the IPX Static Route.')
wfIpxSrDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 7, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSrDisable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSrDisable.setDescription('Enable/Disable parameter. Default is enabled. Users perform an SNMP SET operation on this object in order to enable/disable this IPX Static Route.')
wfIpxSrTargNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 7, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxSrTargNetwork.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSrTargNetwork.setDescription('The Address of this static route')
wfIpxSrCost = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 7, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSrCost.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSrCost.setDescription('The Cost of this Static Route in Hops')
wfIpxSrNextHopNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 7, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxSrNextHopNetwork.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSrNextHopNetwork.setDescription('The Next Hop IPX Address')
wfIpxSrNextHopHost = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 7, 1, 6), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSrNextHopHost.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSrNextHopHost.setDescription('The next Hop Host address')
wfIpxSrTargNetworkRt = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 7, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxSrTargNetworkRt.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSrTargNetworkRt.setDescription('The Route Identifier Which numbered route this is to be')
wfIpxSrTickCost = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 7, 1, 8), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSrTickCost.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSrTickCost.setDescription('The Cost of this Static Route in Ticks')
wfIpxNetBiosStaticRouteTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 8), )
if mibBuilder.loadTexts: wfIpxNetBiosStaticRouteTable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxNetBiosStaticRouteTable.setDescription('The list of static routes used for Configuring IPX')
wfIpxNetBiosStaticRouteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 8, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxNetBiosSrTargNetwork"), (0, "Wellfleet-IPX-MIB", "wfIpxNetBiosSrIntf"))
if mibBuilder.loadTexts: wfIpxNetBiosStaticRouteEntry.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxNetBiosStaticRouteEntry.setDescription('An entry in the Routing Table')
wfIpxNetBiosSrDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 8, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxNetBiosSrDelete.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxNetBiosSrDelete.setDescription('Create/Delete parameter. Default is created. Users perform an SNMP SET operation on this object in order to create/delete the IPX NETBIOS Static Route.')
wfIpxNetBiosSrDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 8, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxNetBiosSrDisable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxNetBiosSrDisable.setDescription('Enable/Disable parameter. Default is enabled. Users perform an SNMP SET operation on this object in order to enable/disable this IPX NETBIOS Static Route.')
wfIpxNetBiosSrTargNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 8, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxNetBiosSrTargNetwork.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxNetBiosSrTargNetwork.setDescription('The Address of this static route')
wfIpxNetBiosSrName = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 8, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxNetBiosSrName.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxNetBiosSrName.setDescription('The Name of the Target Server')
wfIpxNetBiosSrIntf = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 8, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxNetBiosSrIntf.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxNetBiosSrIntf.setDescription('The Interface Identifier for this static entry')
wfIpxNetBiosSrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 8, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxNetBiosSrIndex.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxNetBiosSrIndex.setDescription('The Entry Index Which identifies this is entry')
wfIpxStaticSapTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12), )
if mibBuilder.loadTexts: wfIpxStaticSapTable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapTable.setDescription('The table of Static Services')
wfIpxStaticSapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxStaticSapIntf"), (0, "Wellfleet-IPX-MIB", "wfIpxStaticSapCircuit"), (0, "Wellfleet-IPX-MIB", "wfIpxStaticSapType"), (0, "Wellfleet-IPX-MIB", "wfIpxStaticSapNetwork"), (0, "Wellfleet-IPX-MIB", "wfIpxStaticSapSocket"))
if mibBuilder.loadTexts: wfIpxStaticSapEntry.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapEntry.setDescription('An entry in the Routing Table')
wfIpxStaticSapDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxStaticSapDelete.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapDelete.setDescription('Create/Delete parameter. Default is created. Users perform an SNMP SET operation on this object in order to create/delete the IPX Static SAP.')
wfIpxStaticSapDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxStaticSapDisable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapDisable.setDescription('Enable/Disable parameter. Default is enabled. Users perform an SNMP SET operation on this object in order to enable/disable this IPX Static Route.')
wfIpxStaticSapType = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxStaticSapType.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapType.setDescription('Multiple routes to a single destination can appear in the table, but access to such multiple entries is dependent on the table- access mechanisms defined by the network management protocol in use.')
wfIpxStaticSapNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxStaticSapNetwork.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapNetwork.setDescription('The network address of this service.')
wfIpxStaticSapHost = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1, 5), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxStaticSapHost.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapHost.setDescription('The host address for this service.')
wfIpxStaticSapSocket = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxStaticSapSocket.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapSocket.setDescription('The socket for this particular service')
wfIpxStaticSapName = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1, 7), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxStaticSapName.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapName.setDescription('The server name.')
wfIpxStaticSapHops = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxStaticSapHops.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapHops.setDescription('The number of Hops away service is from router.')
wfIpxStaticSapIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1, 9), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxStaticSapIndex.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapIndex.setDescription('Index string comprised of intf1.type.network.hostid.socket. Makes sorting easier and its a ah work around.')
wfIpxStaticSapIntf = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxStaticSapIntf.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapIntf.setDescription('Network Address of Nexthop Interface.')
wfIpxStaticSapCircuit = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 12, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxStaticSapCircuit.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxStaticSapCircuit.setDescription('Circuit of Nexthop Interface.')
wfIpxSapNetLvlFilterTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 9), )
if mibBuilder.loadTexts: wfIpxSapNetLvlFilterTable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapNetLvlFilterTable.setDescription('The list of Network Level SAP Filters')
wfIpxSapNetLvlFilter = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 9, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxSapNetLvlIntf"), (0, "Wellfleet-IPX-MIB", "wfIpxSapNetLvlIndex"))
if mibBuilder.loadTexts: wfIpxSapNetLvlFilter.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapNetLvlFilter.setDescription('An entry in the Filter Table')
wfIpxSapNetLvlDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 9, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSapNetLvlDelete.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapNetLvlDelete.setDescription('Create/Delete parameter. Default is created. Users perform an SNMP SET operation on this object in order to create/delete the IPX Sap network level filter.')
wfIpxSapNetLvlDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 9, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSapNetLvlDisable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapNetLvlDisable.setDescription('Enable/Disable parameter. Default is enabled. Users perform an SNMP SET operation on this object in order to enable/disable this IPX network level SAP filter.')
wfIpxSapNetLvlTargNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 9, 1, 3), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSapNetLvlTargNetwork.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapNetLvlTargNetwork.setDescription('The Target Network address to monitor')
wfIpxSapNetLvlType = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 9, 1, 4), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSapNetLvlType.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapNetLvlType.setDescription('The Type of service to monitor')
wfIpxSapNetLvlAction = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 9, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("inactive", 2))).clone('active')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSapNetLvlAction.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapNetLvlAction.setDescription('The Action to take Advertise or not Advertise')
wfIpxSapNetLvlIntf = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 9, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxSapNetLvlIntf.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapNetLvlIntf.setDescription('The Interface identifier for this filter')
wfIpxSapNetLvlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 9, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxSapNetLvlIndex.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapNetLvlIndex.setDescription('The Filter Index .')
wfIpxSapServtLvlFilterTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 10), )
if mibBuilder.loadTexts: wfIpxSapServtLvlFilterTable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapServtLvlFilterTable.setDescription('The list of Server Level SAP Filters')
wfIpxSapServLvlFilter = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 10, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxSapServLvlIntf"), (0, "Wellfleet-IPX-MIB", "wfIpxSapServLvlIndex"))
if mibBuilder.loadTexts: wfIpxSapServLvlFilter.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapServLvlFilter.setDescription('An entry in the Filter Table')
wfIpxSapServLvlDelete = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSapServLvlDelete.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapServLvlDelete.setDescription('Create/Delete parameter. Default is created. Users perform an SNMP SET operation on this object in order to create/delete the IPX server-level SAP filter.')
wfIpxSapServLvlDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSapServLvlDisable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapServLvlDisable.setDescription('Enable/Disable parameter. Default is enabled. Users perform an SNMP SET operation on this object in order to enable/disable this IPX server-level SAP filter.')
wfIpxSapServLvlTargServer = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 10, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSapServLvlTargServer.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapServLvlTargServer.setDescription('The Server Name to monitor.')
wfIpxSapServLvlType = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 10, 1, 4), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSapServLvlType.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapServLvlType.setDescription('Type of service File server, Printer, etc...')
wfIpxSapServLvlAction = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 10, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("active", 1), ("inactive", 2))).clone('active')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxSapServLvlAction.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapServLvlAction.setDescription('Associated Action (advertise or ignore )')
wfIpxSapServLvlIntf = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 10, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxSapServLvlIntf.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapServLvlIntf.setDescription('The Interface identifier for this filter')
wfIpxSapServLvlIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 10, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxSapServLvlIndex.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxSapServLvlIndex.setDescription('The Filter Index .')
wfIpxTrafficFilterTable = MibTable((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11), )
if mibBuilder.loadTexts: wfIpxTrafficFilterTable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterTable.setDescription('IPX Traffic Filters')
wfIpxTrafficFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1), ).setIndexNames((0, "Wellfleet-IPX-MIB", "wfIpxTrafficFilterInterface"), (0, "Wellfleet-IPX-MIB", "wfIpxTrafficFilterCircuit"), (0, "Wellfleet-IPX-MIB", "wfIpxTrafficFilterRuleNumber"), (0, "Wellfleet-IPX-MIB", "wfIpxTrafficFilterFragment"))
if mibBuilder.loadTexts: wfIpxTrafficFilterEntry.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterEntry.setDescription('A traffic filter definition')
wfIpxTrafficFilterCreate = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("created", 1), ("deleted", 2))).clone('created')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxTrafficFilterCreate.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterCreate.setDescription('Defines the existence of the traffic filter rule: created - traffic filter exists delete - traffic filter does not exist and can be deleted.')
wfIpxTrafficFilterEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxTrafficFilterEnable.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterEnable.setDescription('Defines whether or not the traffic filter rule should be used: enabled - activate the rule. disabled - inactivate the rule.')
wfIpxTrafficFilterStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("active", 1), ("error", 2), ("inactive", 3))).clone('inactive')).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxTrafficFilterStatus.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterStatus.setDescription('Defines the current status of the traffic filter: inactive - the rule is not in use. active - the rule is being used. error - the application detected an error in the rule.')
wfIpxTrafficFilterCounter = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxTrafficFilterCounter.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterCounter.setDescription('The number of received packets that have matched this rule.')
wfIpxTrafficFilterDefinition = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1, 5), Opaque()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxTrafficFilterDefinition.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterDefinition.setDescription('The filter rule definition.')
wfIpxTrafficFilterReserved = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxTrafficFilterReserved.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterReserved.setDescription('Reserved field.')
wfIpxTrafficFilterInterface = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxTrafficFilterInterface.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterInterface.setDescription('The network address of the IPX interface to which this filter is applied.')
wfIpxTrafficFilterCircuit = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxTrafficFilterCircuit.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterCircuit.setDescription('The ID of the Circuit to which the filter is applied.')
wfIpxTrafficFilterRuleNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxTrafficFilterRuleNumber.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterRuleNumber.setDescription('ID for the rule.')
wfIpxTrafficFilterFragment = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxTrafficFilterFragment.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterFragment.setDescription('Fragment number - for large rules.')
wfIpxTrafficFilterName = MibTableColumn((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 11, 1, 11), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wfIpxTrafficFilterName.setStatus('deprecated')
if mibBuilder.loadTexts: wfIpxTrafficFilterName.setDescription('Name of the rule number.')
wfIpxAggrStats = MibIdentifier((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 14))
wfIpxAggrInDatagrams = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 14, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxAggrInDatagrams.setStatus('mandatory')
if mibBuilder.loadTexts: wfIpxAggrInDatagrams.setDescription('The total number of datagrams received')
wfIpxAggrOutDatagrams = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 14, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxAggrOutDatagrams.setStatus('mandatory')
if mibBuilder.loadTexts: wfIpxAggrOutDatagrams.setDescription('The total number of datagrams sent (includes datagrams forwarded)')
wfIpxAggrFwdDatagrams = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 14, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxAggrFwdDatagrams.setStatus('mandatory')
if mibBuilder.loadTexts: wfIpxAggrFwdDatagrams.setDescription('The total number of datagrams forwarded')
wfIpxAggrInDiscards = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 14, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxAggrInDiscards.setStatus('mandatory')
if mibBuilder.loadTexts: wfIpxAggrInDiscards.setDescription('The total number of datagrams queued for reception that were discarded for resource reasons')
wfIpxAggrInHdrErrs = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 14, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxAggrInHdrErrs.setStatus('mandatory')
if mibBuilder.loadTexts: wfIpxAggrInHdrErrs.setDescription('The total number of datagrams discarded because of errors in the IPX header.')
wfIpxAggrInAddrErrs = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 14, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxAggrInAddrErrs.setStatus('mandatory')
if mibBuilder.loadTexts: wfIpxAggrInAddrErrs.setDescription('The total nmber of datagrams received whose destination address was invalid for this entity.')
wfIpxAggrInUnknownProtos = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 14, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxAggrInUnknownProtos.setStatus('mandatory')
if mibBuilder.loadTexts: wfIpxAggrInUnknownProtos.setDescription('The total number of datagrams received locally which specified an unknown or unsupported protocol.')
wfIpxAggrOutDiscards = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 14, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxAggrOutDiscards.setStatus('mandatory')
if mibBuilder.loadTexts: wfIpxAggrOutDiscards.setDescription('The total number of datagrams queued for transmission that were discarded for resource reasons.')
wfIpxAggrOutNoRoutes = MibScalar((1, 3, 6, 1, 4, 1, 18, 3, 5, 5, 14, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wfIpxAggrOutNoRoutes.setStatus('mandatory')
if mibBuilder.loadTexts: wfIpxAggrOutNoRoutes.setDescription('The total number of datagrams queued for transmission thet were discarded because the destination was unreachable.')
mibBuilder.exportSymbols("Wellfleet-IPX-MIB", wfIpxRipIntfEntry=wfIpxRipIntfEntry, wfIpxStaticRouteTable=wfIpxStaticRouteTable, wfIpxInterfaceInDiscards=wfIpxInterfaceInDiscards, wfIpxTrafficFilterCircuit=wfIpxTrafficFilterCircuit, wfIpxBaseRouteNextHopHost=wfIpxBaseRouteNextHopHost, wfIpxAggrInUnknownProtos=wfIpxAggrInUnknownProtos, wfIpxBaseRouteType=wfIpxBaseRouteType, wfIpxBaseState=wfIpxBaseState, wfIpxStaticSapIntf=wfIpxStaticSapIntf, wfIpxSrDisable=wfIpxSrDisable, wfIpxInterfaceState=wfIpxInterfaceState, wfIpxBaseSapIndex=wfIpxBaseSapIndex, wfIpxNetBiosSrIndex=wfIpxNetBiosSrIndex, wfIpxSapServLvlFilter=wfIpxSapServLvlFilter, wfIpxBaseSapEntry=wfIpxBaseSapEntry, wfIpxBaseHostCount=wfIpxBaseHostCount, wfIpxBaseNetTblFillNotify=wfIpxBaseNetTblFillNotify, wfIpxBaseRouteMetric=wfIpxBaseRouteMetric, wfIpxBaseRoute2NextHopHost=wfIpxBaseRoute2NextHopHost, wfIpxSapServLvlTargServer=wfIpxSapServLvlTargServer, wfIpxAggrInAddrErrs=wfIpxAggrInAddrErrs, wfIpxSrDelete=wfIpxSrDelete, wfIpxInterfaceActiveHostNumber=wfIpxInterfaceActiveHostNumber, wfIpxInterfaceFRBcast=wfIpxInterfaceFRBcast, wfIpxInterfaceSplit=wfIpxInterfaceSplit, wfIpxInterfaceCost=wfIpxInterfaceCost, wfIpxSapServLvlType=wfIpxSapServLvlType, wfIpxInterfaceIndex=wfIpxInterfaceIndex, wfIpxBaseTrigUpdateEn=wfIpxBaseTrigUpdateEn, wfIpxBaseSapHost=wfIpxBaseSapHost, wfIpxBaseRouteProto=wfIpxBaseRouteProto, wfIpxInterfaceCfgEncaps=wfIpxInterfaceCfgEncaps, wfIpxAhTargHostNetwork=wfIpxAhTargHostNetwork, wfIpxSrTickCost=wfIpxSrTickCost, wfIpxBaseMaxNetTblSize=wfIpxBaseMaxNetTblSize, wfIpxSapServLvlDisable=wfIpxSapServLvlDisable, wfIpxBaseRoute2Dest=wfIpxBaseRoute2Dest, wfIpxSapServLvlAction=wfIpxSapServLvlAction, wfIpxRipInterfaceState=wfIpxRipInterfaceState, wfIpxSapNetLvlIntf=wfIpxSapNetLvlIntf, wfIpxStaticSapTable=wfIpxStaticSapTable, wfIpxInterfaceMacAddress=wfIpxInterfaceMacAddress, wfIpxInterfaceCacheHit=wfIpxInterfaceCacheHit, wfIpxAggrOutDiscards=wfIpxAggrOutDiscards, wfIpxInterfaceInHdrErrors=wfIpxInterfaceInHdrErrors, wfIpxTrafficFilterDefinition=wfIpxTrafficFilterDefinition, wfIpxBaseMultipleHostAddrs=wfIpxBaseMultipleHostAddrs, wfIpxNetBiosSrDisable=wfIpxNetBiosSrDisable, wfIpxInterfaceNetbiosAccept=wfIpxInterfaceNetbiosAccept, wfIpxBaseRt2EntryTable=wfIpxBaseRt2EntryTable, wfIpxInterfaceInReceives=wfIpxInterfaceInReceives, wfIpxAhDisable=wfIpxAhDisable, wfIpxBaseSapNetwork=wfIpxBaseSapNetwork, wfIpxInterfaceCfgHostNumber=wfIpxInterfaceCfgHostNumber, wfIpxBaseRoute2Type=wfIpxBaseRoute2Type, wfIpxSapNetLvlAction=wfIpxSapNetLvlAction, wfIpxStaticSapDisable=wfIpxStaticSapDisable, wfIpxInterfaceInAddrErrors=wfIpxInterfaceInAddrErrors, wfIpxBaseSapIntf=wfIpxBaseSapIntf, wfIpxSapNetLvlFilter=wfIpxSapNetLvlFilter, wfIpxTrafficFilterStatus=wfIpxTrafficFilterStatus, wfIpxRipInterfaceSupply=wfIpxRipInterfaceSupply, wfIpxBaseDisable=wfIpxBaseDisable, wfIpxBaseRouteInfo=wfIpxBaseRouteInfo, wfIpxBaseSapHops=wfIpxBaseSapHops, wfIpxNetBiosSrDelete=wfIpxNetBiosSrDelete, wfIpxSapNetLvlIndex=wfIpxSapNetLvlIndex, wfIpxBaseRoute2Info=wfIpxBaseRoute2Info, wfIpxInterfaceTable=wfIpxInterfaceTable, wfIpxInterfaceIpxWanTimeOut=wfIpxInterfaceIpxWanTimeOut, wfIpxBaseRouterName=wfIpxBaseRouterName, wfIpxInterfaceOutNoRoutes=wfIpxInterfaceOutNoRoutes, wfIpxAdjacentHostTable=wfIpxAdjacentHostTable, wfIpxAhNextHopIntf=wfIpxAhNextHopIntf, wfIpxBaseRipMethod=wfIpxBaseRipMethod, wfIpxTrafficFilterRuleNumber=wfIpxTrafficFilterRuleNumber, wfIpxStaticSapHost=wfIpxStaticSapHost, wfIpxBaseRouteNextHopNetwork=wfIpxBaseRouteNextHopNetwork, wfIpxNetBiosSrName=wfIpxNetBiosSrName, wfIpxBaseSapType=wfIpxBaseSapType, wfIpxStaticSapIndex=wfIpxStaticSapIndex, wfIpxBase=wfIpxBase, wfIpxAggrInDiscards=wfIpxAggrInDiscards, wfIpxInterfaceForwDatagrams=wfIpxInterfaceForwDatagrams, wfIpxStaticSapCircuit=wfIpxStaticSapCircuit, wfIpxTrafficFilterReserved=wfIpxTrafficFilterReserved, wfIpxBaseLogFilter=wfIpxBaseLogFilter, wfIpxTrafficFilterName=wfIpxTrafficFilterName, wfIpxInterfaceCircuit=wfIpxInterfaceCircuit, wfIpxStaticSapType=wfIpxStaticSapType, wfIpxBaseDelete=wfIpxBaseDelete, wfIpxStaticSapDelete=wfIpxStaticSapDelete, wfIpxRipInterfaceDelete=wfIpxRipInterfaceDelete, wfIpxSrNextHopNetwork=wfIpxSrNextHopNetwork, wfIpxAggrFwdDatagrams=wfIpxAggrFwdDatagrams, wfIpxBaseRoute2Ticks=wfIpxBaseRoute2Ticks, wfIpxTrafficFilterFragment=wfIpxTrafficFilterFragment, wfIpxNetBiosSrTargNetwork=wfIpxNetBiosSrTargNetwork, wfIpxBasePrimaryNetNumber=wfIpxBasePrimaryNetNumber, wfIpxInterfaceTrEndStation=wfIpxInterfaceTrEndStation, wfIpxInterfaceOutDiscards=wfIpxInterfaceOutDiscards, wfIpxSapServLvlIndex=wfIpxSapServLvlIndex, wfIpxSapNetLvlDisable=wfIpxSapNetLvlDisable, wfIpxInterfaceFRMcast=wfIpxInterfaceFRMcast, wfIpxBaseRouteDest=wfIpxBaseRouteDest, wfIpxSapNetLvlDelete=wfIpxSapNetLvlDelete, wfIpxBaseRouteAge=wfIpxBaseRouteAge, wfIpxBaseSapEntryTable=wfIpxBaseSapEntryTable, wfIpxInterfaceEncaps=wfIpxInterfaceEncaps, wfIpxInterfaceIpxWanLinkRetry=wfIpxInterfaceIpxWanLinkRetry, wfIpxBaseRoute2IfIndex=wfIpxBaseRoute2IfIndex, wfIpxStaticSapSocket=wfIpxStaticSapSocket, wfIpxBaseNovellCertificationConformance=wfIpxBaseNovellCertificationConformance, wfIpxRipIntfTable=wfIpxRipIntfTable, wfIpxSrCost=wfIpxSrCost, wfIpxSrTargNetworkRt=wfIpxSrTargNetworkRt, wfIpxTrafficFilterEntry=wfIpxTrafficFilterEntry, wfIpxBaseRoute2Age=wfIpxBaseRoute2Age, wfIpxRipInterfaceListen=wfIpxRipInterfaceListen, wfIpxNetBiosStaticRouteTable=wfIpxNetBiosStaticRouteTable, wfIpxBaseMaximumPath=wfIpxBaseMaximumPath, wfIpxSrNextHopHost=wfIpxSrNextHopHost, wfIpxAdjacentHostEntry=wfIpxAdjacentHostEntry, wfIpxBaseSapName=wfIpxBaseSapName, wfIpxBaseNetCount=wfIpxBaseNetCount, wfIpxSapNetLvlType=wfIpxSapNetLvlType, wfIpxSapNetLvlFilterTable=wfIpxSapNetLvlFilterTable, wfIpxSrTargNetwork=wfIpxSrTargNetwork, wfIpxBaseRoute2NextHopNetwork=wfIpxBaseRoute2NextHopNetwork, wfIpxStaticSapName=wfIpxStaticSapName, wfIpxBaseSapSocket=wfIpxBaseSapSocket, wfIpxInterfaceMaxInfo=wfIpxInterfaceMaxInfo, wfIpxBaseRoute2Proto=wfIpxBaseRoute2Proto, wfIpxSapServtLvlFilterTable=wfIpxSapServtLvlFilterTable, wfIpxAggrOutNoRoutes=wfIpxAggrOutNoRoutes, wfIpxBaseSapAge=wfIpxBaseSapAge, wfIpxStaticSapNetwork=wfIpxStaticSapNetwork, wfIpxBaseRoute2Hops=wfIpxBaseRoute2Hops, wfIpxInterfaceWanSapPeriod=wfIpxInterfaceWanSapPeriod, wfIpxNetBiosStaticRouteEntry=wfIpxNetBiosStaticRouteEntry, wfIpxStaticSapEntry=wfIpxStaticSapEntry, wfIpxTrafficFilterTable=wfIpxTrafficFilterTable, wfIpxTrafficFilterCounter=wfIpxTrafficFilterCounter, wfIpxAggrStats=wfIpxAggrStats, wfIpxAggrInHdrErrs=wfIpxAggrInHdrErrs, wfIpxAggrInDatagrams=wfIpxAggrInDatagrams, wfIpxStaticSapHops=wfIpxStaticSapHops, wfIpxInterfaceXsumOn=wfIpxInterfaceXsumOn, wfIpxTrafficFilterInterface=wfIpxTrafficFilterInterface, wfIpxNetBiosSrIntf=wfIpxNetBiosSrIntf, wfIpxRipInterfaceDisable=wfIpxRipInterfaceDisable, wfIpxInterfaceSMDSGroupAddress=wfIpxInterfaceSMDSGroupAddress, wfIpxAhTargHostId=wfIpxAhTargHostId, wfIpxSapServLvlDelete=wfIpxSapServLvlDelete, wfIpxTrafficFilterEnable=wfIpxTrafficFilterEnable, wfIpxBaseNetTblSize=wfIpxBaseNetTblSize, wfIpxInterfaceWanRipPeriod=wfIpxInterfaceWanRipPeriod, wfIpxStaticRouteEntry=wfIpxStaticRouteEntry, wfIpxInterfaceDelete=wfIpxInterfaceDelete, wfIpxSapNetLvlTargNetwork=wfIpxSapNetLvlTargNetwork, wfIpxInterfaceDisable=wfIpxInterfaceDisable, wfIpxInterfaceInUnknownProtos=wfIpxInterfaceInUnknownProtos, wfIpxBaseRtEntryTable=wfIpxBaseRtEntryTable, wfIpxInterfaceNetworkNumber=wfIpxInterfaceNetworkNumber, wfIpxAggrOutDatagrams=wfIpxAggrOutDatagrams, wfIpxAhDelete=wfIpxAhDelete, wfIpxInterfaceInDelivers=wfIpxInterfaceInDelivers, wfIpxInterfaceNetbiosDeliver=wfIpxInterfaceNetbiosDeliver, wfIpxRipInterfaceIndex=wfIpxRipInterfaceIndex, wfIpxBaseNetSizeBoundEn=wfIpxBaseNetSizeBoundEn, wfIpxBaseRt2Entry=wfIpxBaseRt2Entry, wfIpxBaseCfgHostNumber=wfIpxBaseCfgHostNumber, wfIpxInterfaceEntry=wfIpxInterfaceEntry, wfIpxInterfaceIpxWanCommonNet=wfIpxInterfaceIpxWanCommonNet, wfIpxInterfaceIpxWanDisable=wfIpxInterfaceIpxWanDisable, wfIpxInterfaceOutRequests=wfIpxInterfaceOutRequests, wfIpxBaseServiceCount=wfIpxBaseServiceCount, wfIpxAhDlci=wfIpxAhDlci, wfIpxTrafficFilterCreate=wfIpxTrafficFilterCreate, wfIpxBaseRtEntry=wfIpxBaseRtEntry, wfIpxSapServLvlIntf=wfIpxSapServLvlIntf, wfIpxBaseActiveHostNumber=wfIpxBaseActiveHostNumber, wfIpxBaseRouteIfIndex=wfIpxBaseRouteIfIndex)
| 149.137255 | 8,318 | 0.792437 |
a630997d67ddc69ed95d616379d67a45f7d25a4b | 503 | py | Python | database/information/INNODB_FT_DELETED.py | mshobair/invitro_cheminformatics | 17201496c73453accd440646a1ee81726119a59c | [
"MIT"
] | null | null | null | database/information/INNODB_FT_DELETED.py | mshobair/invitro_cheminformatics | 17201496c73453accd440646a1ee81726119a59c | [
"MIT"
] | null | null | null | database/information/INNODB_FT_DELETED.py | mshobair/invitro_cheminformatics | 17201496c73453accd440646a1ee81726119a59c | [
"MIT"
] | null | null | null | import datetime
from database.database_schemas import Schemas
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.dialects.mysql import BIGINT, SMALLINT, DOUBLE, TIMESTAMP, TINYINT
from database.base import Base
class InnodbFtDeleted(Base):
"""Maps to INNODB_FT_DELETED table in information databases."""
__tablename__ = 'INNODB_FT_DELETED'
__table_args__ = {'schema': Schemas.information_schema}
DOC_ID = Column(BIGINT, nullable=False, default=0)
| 29.588235 | 82 | 0.785288 |
9a92742bc79d7afbc8b07de6abf9240a6beb4eeb | 487 | py | Python | agent/test/test_meter_unittest.py | rampopat/charje | 3af178bd72800e339c45637356440780c3b0563a | [
"MIT"
] | 1 | 2021-12-22T02:04:40.000Z | 2021-12-22T02:04:40.000Z | agent/test/test_meter_unittest.py | rampopat/charje | 3af178bd72800e339c45637356440780c3b0563a | [
"MIT"
] | null | null | null | agent/test/test_meter_unittest.py | rampopat/charje | 3af178bd72800e339c45637356440780c3b0563a | [
"MIT"
] | null | null | null | import unittest
from unittest import TestCase
from agents.meter import Meter
class TestMeter(TestCase):
def test_get_latest_aggregate_consumption(self):
expected1 = 0.434
expected2 = 0.561
meter = Meter('MAC000002')
actual1 = meter.get_latest_consumption()
actual2 = meter.get_latest_consumption()
self.assertEqual(expected1, actual1)
self.assertEqual(expected2, actual2)
if __name__ == '__main__':
unittest.main()
| 21.173913 | 52 | 0.691992 |
f8c002c523f224a211995b10f778bcca6d87bd4d | 2,383 | py | Python | train/Prm/create_prm.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | null | null | null | train/Prm/create_prm.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | 12 | 2021-11-30T16:56:05.000Z | 2021-12-13T16:26:31.000Z | train/Prm/create_prm.py | louisletoumelin/wind_downscaling_cnn | 9d08711620db1ee1f472847f0e822c5f4eb1d300 | [
"W3C"
] | null | null | null | import itertools
'''
prm
'''
def create_prm_dict():
"""
Create several dictionaries containing information about the training parameters.
Input: Parameters (defined inside the function)
Outputs: [prm1, prm2, ...] with element being a dictionary with info about training
"""
prms = {
# Necessary
'date': ['21_12_2021'],
# 'VCD' or 'UNet'
'model': ['UNet'],
# Specify only one name even if multiple prm are contained in prms
'name_simu': ['classic_all_low_epochs'],
# 'fold', 'class', 'degree', 'xi', 'all'
'type_of_training': ['all'],
# General
'loss': ["mse"],
'learning_rate': [0.001],
'decay': [0.0001],
# 'RMSprop' 'Adam' 'AMSgrad' 'Adamax' 'Nadam'
'optimizer': ['RMSprop'],
'list_metrics': [['mae', 'root_mse']],
'epochs': [48], # 150, 48 for classic epochs after early stopping, 40 for no_dropout
'batch_size': [32],
'additional_flat_topo': [False],
# Reduce on plateau
'ROP_factor': [0.1],
'ROP_patience': [5],
'ROP_min_lr': [1e-10],
# Convolution
'kernel_size': [(3, 3)], # (3,3)
'padding': ['same'],
'nb_filters': [32],
# Initializer
# Default = glorot_uniform_initializer, 'glorot_normal', 'lecun_uniform', 'lecun_normal'
'initializer': [None],
# Up conv
'up_conv': [(2, 2)],
# Activation
# 'relu', 'elu', 'selu'
'activation': ['relu'],
'activation_regression': ['linear'],
# Pooling, batch norm and dropout
'pool_size': [(2, 2)], # (2, 2)
'minimal_dropout_layers': [True], # True
'full_dropout': [False], # False
'dropout': [0.25],
'full_batch_norm': [False], # False
'early_stopping_patience': [15],
'early_stopping_min_delta': [0.0001],
# Other
'n_rows': [79],
'n_col': [69],
'input_shape': [(79, 69, 1)],
'output_shape': [(79, 69, 3)],
'nb_channels_output': [3],
'input_dir': ["//home/mrmn/letoumelinl/train"],
'output_dir': ["//scratch/mrmn/letoumelinl/ARPS/"],
'GPU': [True]
}
keys, values = zip(*prms.items())
list_prm = [dict(zip(keys, v)) for v in itertools.product(*values)]
return (list_prm)
| 30.164557 | 96 | 0.53504 |
7147abf426c731237bd5fefff867811b2c61c9b9 | 18,873 | py | Python | validator/tests/test_scheduler/yaml_scheduler_tester.py | suparnadhar/SuparnaGit | bec2704d8b6bc1802523ec26dcb902f59a747a4d | [
"Apache-2.0"
] | 1 | 2017-08-04T10:31:00.000Z | 2017-08-04T10:31:00.000Z | validator/tests/test_scheduler/yaml_scheduler_tester.py | suparnadhar/SuparnaGit | bec2704d8b6bc1802523ec26dcb902f59a747a4d | [
"Apache-2.0"
] | null | null | null | validator/tests/test_scheduler/yaml_scheduler_tester.py | suparnadhar/SuparnaGit | bec2704d8b6bc1802523ec26dcb902f59a747a4d | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import binascii
from collections import namedtuple
import copy
import hashlib
import itertools
import logging
import time
import uuid
import yaml
import sawtooth_signing as signing
from sawtooth_validator.database.dict_database import DictDatabase
from sawtooth_validator.execution.scheduler import BatchExecutionResult
from sawtooth_validator.state.merkle import MerkleDatabase
import sawtooth_validator.protobuf.batch_pb2 as batch_pb2
import sawtooth_validator.protobuf.transaction_pb2 as transaction_pb2
LOGGER = logging.getLogger(__name__)
# Used in creating batches from the yaml file and keeping the
# ordering specified in the yaml file.
UnProcessedBatchInfo = namedtuple('UnprocessedBatchInfo',
['batch', 'key'])
def create_transaction(payload, private_key, public_key, inputs=None,
outputs=None, dependencies = None):
addr = '000000' + hashlib.sha512(payload).hexdigest()[:64]
if inputs is None:
inputs = [addr]
else:
inputs = inputs.copy().append(addr)
if outputs is None:
outputs = [addr]
else:
outputs.copy().append(addr)
if dependencies is None:
dependencies = []
header = transaction_pb2.TransactionHeader(
signer_pubkey=public_key,
family_name='scheduler_test',
family_version='1.0',
inputs=inputs,
outputs=outputs,
dependencies=dependencies,
nonce=str(time.time()),
payload_encoding="application/cbor",
payload_sha512=hashlib.sha512(payload).hexdigest(),
batcher_pubkey=public_key)
header_bytes = header.SerializeToString()
signature = signing.sign(header_bytes, private_key)
transaction = transaction_pb2.Transaction(
header=header_bytes,
payload=payload,
header_signature=signature)
return transaction, header
def create_batch(transactions, private_key, public_key):
transaction_ids = [t.header_signature for t in transactions]
header = batch_pb2.BatchHeader(
signer_pubkey=public_key,
transaction_ids=transaction_ids)
header_bytes = header.SerializeToString()
signature = signing.sign(header_bytes, private_key)
batch = batch_pb2.Batch(
header=header_bytes,
transactions=transactions,
header_signature=signature)
return batch
class SchedulerTester(object):
""" The canonical form of the yaml is:
- <------------------------------------------ batch start
state_hash: string. Optional. No default.
- <----------------------------------------- transaction start
inputs: list of string. Required.
- ....
outputs: list of string. Required.
- ....
addresses_to_set: list of dict. Optional.
- string <address>: Optional bytes <value>
valid: boolean. Optional. Defaults to True
dependencies: list of string. Optional. Defaults to empty list.
- ..... string. No default. If a dependency is the
same string as an 'id' for another txn, that txn's
signature will be used for the actual Transaction's
dependency. If the string is not an 'id' of another
txn, if it is longer than 20 characters it will be
used as if is is the actual
Transaction.header_signature for the dependency.
If not, it will be disregarded.
id: string. Optional. No default."""
def __init__(self, file_name):
"""
Args:
file_name (str): The yaml filename and path.
scheduler (scheduler.Scheduler): Any Scheduler implementaion
context_manager (context_manager.ContextManager): The context
manager holding state for this scheduler.
"""
self._yaml_file_name = file_name
self._counter = itertools.count(0)
self._referenced_txns_in_other_batches = {}
# txn.header_signature : (is_valid, [{add: bytes}
self._txn_execution = {}
self._batch_results = {}
self._batches = []
self._create_batches()
@property
def batch_results(self):
"""The batch results calculated from the yaml file.
Returns:
(dict): Computed from the yaml file, a dictionary with
batch signature keys and BatchExecutionResult values.
"""
return self._batch_results
def run_scheduler(self, scheduler, context_manager, validation_state_hash=None):
"""Add all the batches to the scheduler in order and then run through
the txns in the scheduler, calling next_transaction() after each
transaction_execution_result is set.
Args:
scheduler (scheduler.Scheduler): Any implementation of the
Scheduler abstract base class.
context_manager (context_manager.ContextManager): The context
manager is needed to store state based on the yaml file.
validation_state_hash (str): Used in cases where the yaml
represents a single block of valid batches, and the
state hash is not in the yaml file. This state hash is added
to the last batch in the scheduler.
Returns batch_results (list of tuples): A list of tuples of
batch signature, BatchExecutionResult pairs.
"""
for i, batch in enumerate(self._batches):
if i == len(self._batches) - 1 and \
validation_state_hash is not None:
s_h = validation_state_hash
else:
s_h = self._batch_results[batch.header_signature].state_hash
scheduler.add_batch(batch=batch, state_hash=s_h)
scheduler.finalize()
txns_to_process = []
while not scheduler.complete(block=False):
stop = False
while not stop:
txn_info = scheduler.next_transaction()
if txn_info is not None:
txns_to_process.append(txn_info)
else:
stop = True
t_info = txns_to_process.pop()
inputs_outputs = self._get_inputs_outputs(t_info.txn)
c_id = context_manager.create_context(
state_hash=t_info.state_hash,
base_contexts=t_info.base_context_ids,
inputs=inputs_outputs[0],
outputs=inputs_outputs[1])
validity_of_transaction, address_values = self._txn_execution[
t_info.txn.header_signature]
context_manager.set(
context_id=c_id,
address_value_list=address_values)
if validity_of_transaction is False:
context_manager.delete_contexts(
context_id_list=[c_id])
scheduler.set_transaction_execution_result(
txn_signature=t_info.txn.header_signature,
is_valid=validity_of_transaction,
context_id=c_id)
batch_ids = [b.header_signature for b in self._batches]
batch_results = [
(b_id, scheduler.get_batch_execution_result(b_id))
for b_id in batch_ids]
return batch_results
def compute_state_hashes_wo_scheduler(self):
"""Creates a state hash from the state updates from each txn in a
valid batch.
Returns state_hashes (list of str): The merkle roots from state
changes in 1 or more blocks in the yaml file.
"""
tree = MerkleDatabase(database=DictDatabase())
state_hashes = []
updates = {}
for batch in self._batches:
b_id = batch.header_signature
result = self._batch_results[b_id]
if result.is_valid:
for txn in batch.transactions:
txn_id = txn.header_signature
_, address_values = self._txn_execution[txn_id]
batch_updates = {}
for pair in address_values:
batch_updates.update({a: pair[a] for a in pair.keys()})
# since this is entirely serial, any overwrite
# of an address is expected and desirable.
updates.update(batch_updates)
# This handles yaml files that have state roots in them
if result.state_hash is not None:
s_h = tree.update(set_items=updates, virtual=False)
tree.set_merkle_root(merkle_root=s_h)
state_hashes.append(s_h)
if len(state_hashes) == 0:
state_hashes.append(tree.update(set_items=updates))
return state_hashes
def _address(self, add, require_full=False):
if ':sha' not in add and ',' not in add:
return add
if ',' in add:
return binascii.hexlify(bytearray(
[int(i) for i in add.split(',')]))
parts = add.split(':')
assert parts[0] is not '', "{} is not correctly specified".format(add)
if len(parts) > 2 and not require_full:
# eg. 'aaabbbb:sha:56'
length = min(int(parts[2]), 70)
intermediate = parts[0]
address = hashlib.sha512(
intermediate.encode()).hexdigest()[:length]
elif len(parts) == 2:
# eg. 'aaabbbb:sha'
intermediate = parts[0]
address = hashlib.sha512(intermediate.encode()).hexdigest()[:70]
else:
raise ValueError("Address specified by {} could "
"not be formed".format(add))
return address
def _get_inputs_outputs(self, txn):
"""Similarly to the TransactionExecutor, deserialize the inputs and
outputs.
Notes:
The SchedulerTester has the inputs and outputs from the yaml file
that it used to create the transaction, but it seems less
error-prone to recreate the behavior of the TransactionExecutor.
Args:
txn (sawtooth_validator.protobuf.transaction_pb2.Transaction)
Returns (tuple): (inputs, outputs)
"""
header = transaction_pb2.TransactionHeader()
header.ParseFromString(txn.header)
return header.inputs, header.outputs
def _bytes_if_none(self, value):
if value is None:
value = uuid.uuid4().hex.encode()
return value
def _yaml_from_file(self):
with open(self._yaml_file_name, 'r') as infile:
test_yaml = yaml.safe_load(infile)
return test_yaml
def _unique_integer_key(self):
return next(self._counter)
def _contains_and_not_none(self, key, obj):
return key in obj and obj[key] is not None
def _process_prev_batches(self,
unprocessed_batches,
priv_key,
pub_key,
strip_deps=False):
batches = []
batches_waiting = []
b_results = {}
for batch_info in unprocessed_batches:
batch = batch_info.batch
key = batch_info.key
batch_state_root = None
if self._contains_and_not_none('state_hash', batch):
batch_state_root = batch['state_hash']
if 'state_hash' in batch: # here we don't care if it is None
del batch['state_hash']
txn_processing_result = self._process_txns(
batch=batch,
priv_key=priv_key,
pub_key=pub_key,
strip_deps=strip_deps)
if txn_processing_result is None:
batches_waiting.append(batch_info)
else:
txns, batch_is_valid = txn_processing_result
batch_real = create_batch(
transactions=txns,
private_key=priv_key,
public_key=pub_key)
b_results[batch_real.header_signature] = BatchExecutionResult(
is_valid=batch_is_valid,
state_hash=batch_state_root)
batches.append((batch_real, key))
return batches, b_results, batches_waiting
def _process_batches(self, yaml_batches, priv_key, pub_key):
batches = []
batches_waiting = []
b_results = {}
for batch in yaml_batches:
batch_state_root = None
batch_dict = None
if self._contains_and_not_none('state_hash', batch):
batch_state_root = batch['state_hash']
if 'state_hash' in batch: # here we don't care if it is None
batch_dict = copy.copy(batch)
del batch['state_hash']
txn_processing_result = self._process_txns(
batch=batch,
priv_key=priv_key,
pub_key=pub_key)
if txn_processing_result is None:
key = self._unique_integer_key()
batches.append(key)
waiting_batch = UnProcessedBatchInfo(
batch=batch_dict if batch_dict is not None else batch,
key=key)
batches_waiting.append(waiting_batch)
else:
txns, batch_is_valid = txn_processing_result
batch_real = create_batch(
transactions=txns,
private_key=priv_key,
public_key=pub_key)
b_results[batch_real.header_signature] = BatchExecutionResult(
is_valid=batch_is_valid,
state_hash=batch_state_root)
batches.append(batch_real)
return batches, b_results, batches_waiting
def _process_txns(self, batch, priv_key, pub_key, strip_deps=False):
txns = []
referenced_txns = {}
execution = {}
batch_is_valid = True
for transaction in batch:
is_valid = True
addresses_to_set = []
inputs = transaction['inputs']
outputs = transaction['outputs']
inputs_real = [self._address(a) for a in inputs]
outputs_real = [self._address(a) for a in outputs]
if self._contains_and_not_none('addresses_to_set', transaction):
addresses_to_set = [
{self._address(a, require_full=True): self._bytes_if_none(
d[a])
for a in d}
for d in transaction['addresses_to_set']
]
if self._contains_and_not_none('valid', transaction):
is_valid = bool(transaction['valid'])
if not is_valid:
batch_is_valid = False
if self._contains_and_not_none('dependencies', transaction) and \
not strip_deps:
if any([a not in self._referenced_txns_in_other_batches and
len(a) <= 20 for a in transaction['dependencies']]):
# This txn has a dependency with a txn signature that is
# not known about, so delay processing this batch.
return None
dependencies = [
self._referenced_txns_in_other_batches[a]
if a in self._referenced_txns_in_other_batches else a
for a in transaction['dependencies']]
dependencies = [a for a in dependencies if len(a) > 20]
else:
dependencies = []
txn, _ = create_transaction(
payload=uuid.uuid4().hex.encode(),
dependencies=dependencies,
inputs=inputs_real,
outputs=outputs_real,
private_key=priv_key,
public_key=pub_key)
if self._contains_and_not_none('name', transaction):
referenced_txns[transaction['name']] = txn.header_signature
execution[txn.header_signature] = (is_valid, addresses_to_set)
txns.append(txn)
self._txn_execution.update(execution)
self._referenced_txns_in_other_batches.update(referenced_txns)
return txns, batch_is_valid
def _create_batches(self):
test_yaml = self._yaml_from_file()
priv_key = signing.generate_privkey()
pub_key = signing.generate_pubkey(priv_key)
batches, batch_results, batches_waiting = self._process_batches(
yaml_batches=test_yaml,
priv_key=priv_key,
pub_key=pub_key)
# if there aren't any explicit dependencies that need to be created
# based on the transaction 'id' listed in the yaml, the next two
# code blocks won't be run.
while len(batches_waiting) > 0:
b, b_r, b_w = self._process_prev_batches(
unprocessed_batches=batches_waiting,
priv_key=priv_key,
pub_key=pub_key)
if len(batches_waiting) == len(b_w):
# If any process attempt doesn't produce a new batch,
# there is probably a cyclic dependency
break
if b:
for batch, key in b:
ind = batches.index(key)
batches[ind] = batch
batch_results.update(b_r)
batches_waiting = b_w
# Here process the batches with transaction dependencies that can't
# be computed for some reason, so just strip them out.
if batches_waiting:
b, b_r, b_w = self._process_prev_batches(
batches_waiting,
priv_key=priv_key,
pub_key=pub_key,
strip_deps=True)
for batch, key in b:
ind = batches.index(key)
batches[ind] = batch
batch_results.update(b_r)
self._batch_results = batch_results
self._batches = batches
| 38.437882 | 84 | 0.587612 |
06ae3643944f97dbcd09aa85d2bd01859f5595c6 | 796 | py | Python | testprojectD-rice-d058558a4d4f/rice/api/change_phone.py | YuanXMjoy/rice | 05e908eea8c9189c3b392d2d57e5653191bf1da9 | [
"MIT"
] | null | null | null | testprojectD-rice-d058558a4d4f/rice/api/change_phone.py | YuanXMjoy/rice | 05e908eea8c9189c3b392d2d57e5653191bf1da9 | [
"MIT"
] | null | null | null | testprojectD-rice-d058558a4d4f/rice/api/change_phone.py | YuanXMjoy/rice | 05e908eea8c9189c3b392d2d57e5653191bf1da9 | [
"MIT"
] | null | null | null | from flask import request, jsonify
from .. import db
from ..models import User
from . import api
@api.route('/change_phone', methods=['PUT'])
def change_phone():
username = request.form.get('username', type=str)
password = request.form.get('password', type=str)
new_phone_number = request.form.get('phone_number', type=str)
u = User.query.filter_by(username=username).first()
if u is None:
return jsonify({
"ok": False,
"errmsg": "User not found."
})
if u.verify_password(password):
u.phone_number = new_phone_number
db.session.add(u)
db.session.commit()
return jsonify({
"ok": True,
})
return jsonify({
"ok": False,
"errmsg": "Password not match."
})
| 25.677419 | 65 | 0.594221 |
5d07a2776f6d1b9ef7f27f5cb74d638333586115 | 562 | py | Python | ch15/LineBot/app/router.py | antallen/PythonMaterial | c582fb1610610feb72002f43a3758d5c58d6da85 | [
"MIT"
] | null | null | null | ch15/LineBot/app/router.py | antallen/PythonMaterial | c582fb1610610feb72002f43a3758d5c58d6da85 | [
"MIT"
] | null | null | null | ch15/LineBot/app/router.py | antallen/PythonMaterial | c582fb1610610feb72002f43a3758d5c58d6da85 | [
"MIT"
] | 1 | 2021-07-23T09:59:15.000Z | 2021-07-23T09:59:15.000Z | from app import app, handler, request, abort
from linebot.exceptions import InvalidSignatureError
from flask import render_template
# 設定預設網頁
@app.route("/")
def home():
return render_template("home.html")
# 接收 Line 平台來的「通知」
@app.route("/callback", methods=['POST'])
def callback():
signature = request.headers['X-Line-Signature']
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
print(body)
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK' | 24.434783 | 52 | 0.692171 |
44d25ee91440d627c19156e2f958651e1c204727 | 14,981 | py | Python | sdk/python/pulumi_aws/ec2/vpc_endpoint.py | JakeGinnivan/pulumi-aws | c91ef78932964ac74eda7f5da81f65b0f1798c93 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/vpc_endpoint.py | JakeGinnivan/pulumi-aws | c91ef78932964ac74eda7f5da81f65b0f1798c93 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/ec2/vpc_endpoint.py | JakeGinnivan/pulumi-aws | c91ef78932964ac74eda7f5da81f65b0f1798c93 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class VpcEndpoint(pulumi.CustomResource):
auto_accept: pulumi.Output[bool]
"""
Accept the VPC endpoint (the VPC endpoint and service need to be in the same AWS account).
"""
cidr_blocks: pulumi.Output[list]
"""
The list of CIDR blocks for the exposed AWS service. Applicable for endpoints of type `Gateway`.
"""
dns_entries: pulumi.Output[list]
"""
The DNS entries for the VPC Endpoint. Applicable for endpoints of type `Interface`. DNS blocks are documented below.
* `dns_name` (`str`) - The DNS name.
* `hosted_zone_id` (`str`) - The ID of the private hosted zone.
"""
network_interface_ids: pulumi.Output[list]
"""
One or more network interfaces for the VPC Endpoint. Applicable for endpoints of type `Interface`.
"""
owner_id: pulumi.Output[str]
"""
The ID of the AWS account that owns the VPC endpoint.
"""
policy: pulumi.Output[str]
"""
A policy to attach to the endpoint that controls access to the service. Defaults to full access. All `Gateway` and some `Interface` endpoints support policies - see the [relevant AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints-access.html) for more details.
"""
prefix_list_id: pulumi.Output[str]
"""
The prefix list ID of the exposed AWS service. Applicable for endpoints of type `Gateway`.
"""
private_dns_enabled: pulumi.Output[bool]
"""
Whether or not to associate a private hosted zone with the specified VPC. Applicable for endpoints of type `Interface`.
Defaults to `false`.
"""
requester_managed: pulumi.Output[bool]
"""
Whether or not the VPC Endpoint is being managed by its service - `true` or `false`.
"""
route_table_ids: pulumi.Output[list]
"""
One or more route table IDs. Applicable for endpoints of type `Gateway`.
"""
security_group_ids: pulumi.Output[list]
"""
The ID of one or more security groups to associate with the network interface. Required for endpoints of type `Interface`.
"""
service_name: pulumi.Output[str]
"""
The service name. For AWS services the service name is usually in the form `com.amazonaws.<region>.<service>` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker.<region>.notebook`).
"""
state: pulumi.Output[str]
"""
The state of the VPC endpoint.
"""
subnet_ids: pulumi.Output[list]
"""
The ID of one or more subnets in which to create a network interface for the endpoint. Applicable for endpoints of type `Interface`.
"""
tags: pulumi.Output[dict]
"""
A map of tags to assign to the resource.
"""
vpc_endpoint_type: pulumi.Output[str]
"""
The VPC endpoint type, `Gateway` or `Interface`. Defaults to `Gateway`.
"""
vpc_id: pulumi.Output[str]
"""
The ID of the VPC in which the endpoint will be used.
"""
def __init__(__self__, resource_name, opts=None, auto_accept=None, policy=None, private_dns_enabled=None, route_table_ids=None, security_group_ids=None, service_name=None, subnet_ids=None, tags=None, vpc_endpoint_type=None, vpc_id=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a VPC Endpoint resource.
> **NOTE on VPC Endpoints and VPC Endpoint Associations:** This provider provides both standalone VPC Endpoint Associations for
Route Tables - (an association between a VPC endpoint and a single `route_table_id`) and
Subnets - (an association between a VPC endpoint and a single `subnet_id`) and
a VPC Endpoint resource with `route_table_ids` and `subnet_ids` attributes.
Do not use the same resource ID in both a VPC Endpoint resource and a VPC Endpoint Association resource.
Doing so will cause a conflict of associations and will overwrite the association.
## Example Usage
### Basic
```python
import pulumi
import pulumi_aws as aws
s3 = aws.ec2.VpcEndpoint("s3",
service_name="com.amazonaws.us-west-2.s3",
vpc_id=aws_vpc["main"]["id"])
```
### Basic w/ Tags
```python
import pulumi
import pulumi_aws as aws
s3 = aws.ec2.VpcEndpoint("s3",
service_name="com.amazonaws.us-west-2.s3",
tags={
"Environment": "test",
},
vpc_id=aws_vpc["main"]["id"])
```
### Interface Endpoint Type
```python
import pulumi
import pulumi_aws as aws
ec2 = aws.ec2.VpcEndpoint("ec2",
private_dns_enabled=True,
security_group_ids=[aws_security_group["sg1"]["id"]],
service_name="com.amazonaws.us-west-2.ec2",
vpc_endpoint_type="Interface",
vpc_id=aws_vpc["main"]["id"])
```
### Non-AWS Service
```python
import pulumi
import pulumi_aws as aws
ptfe_service_vpc_endpoint = aws.ec2.VpcEndpoint("ptfeServiceVpcEndpoint",
private_dns_enabled=False,
security_group_ids=[aws_security_group["ptfe_service"]["id"]],
service_name=var["ptfe_service"],
subnet_ids=[local["subnet_ids"]],
vpc_endpoint_type="Interface",
vpc_id=var["vpc_id"])
internal = aws.route53.get_zone(name="vpc.internal.",
private_zone=True,
vpc_id=var["vpc_id"])
ptfe_service_record = aws.route53.Record("ptfeServiceRecord",
name=f"ptfe.{internal.name}",
records=[ptfe_service_vpc_endpoint.dns_entries[0]["dns_name"]],
ttl="300",
type="CNAME",
zone_id=internal.zone_id)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_accept: Accept the VPC endpoint (the VPC endpoint and service need to be in the same AWS account).
:param pulumi.Input[str] policy: A policy to attach to the endpoint that controls access to the service. Defaults to full access. All `Gateway` and some `Interface` endpoints support policies - see the [relevant AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints-access.html) for more details.
:param pulumi.Input[bool] private_dns_enabled: Whether or not to associate a private hosted zone with the specified VPC. Applicable for endpoints of type `Interface`.
Defaults to `false`.
:param pulumi.Input[list] route_table_ids: One or more route table IDs. Applicable for endpoints of type `Gateway`.
:param pulumi.Input[list] security_group_ids: The ID of one or more security groups to associate with the network interface. Required for endpoints of type `Interface`.
:param pulumi.Input[str] service_name: The service name. For AWS services the service name is usually in the form `com.amazonaws.<region>.<service>` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker.<region>.notebook`).
:param pulumi.Input[list] subnet_ids: The ID of one or more subnets in which to create a network interface for the endpoint. Applicable for endpoints of type `Interface`.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] vpc_endpoint_type: The VPC endpoint type, `Gateway` or `Interface`. Defaults to `Gateway`.
:param pulumi.Input[str] vpc_id: The ID of the VPC in which the endpoint will be used.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['auto_accept'] = auto_accept
__props__['policy'] = policy
__props__['private_dns_enabled'] = private_dns_enabled
__props__['route_table_ids'] = route_table_ids
__props__['security_group_ids'] = security_group_ids
if service_name is None:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
__props__['subnet_ids'] = subnet_ids
__props__['tags'] = tags
__props__['vpc_endpoint_type'] = vpc_endpoint_type
if vpc_id is None:
raise TypeError("Missing required property 'vpc_id'")
__props__['vpc_id'] = vpc_id
__props__['cidr_blocks'] = None
__props__['dns_entries'] = None
__props__['network_interface_ids'] = None
__props__['owner_id'] = None
__props__['prefix_list_id'] = None
__props__['requester_managed'] = None
__props__['state'] = None
super(VpcEndpoint, __self__).__init__(
'aws:ec2/vpcEndpoint:VpcEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, auto_accept=None, cidr_blocks=None, dns_entries=None, network_interface_ids=None, owner_id=None, policy=None, prefix_list_id=None, private_dns_enabled=None, requester_managed=None, route_table_ids=None, security_group_ids=None, service_name=None, state=None, subnet_ids=None, tags=None, vpc_endpoint_type=None, vpc_id=None):
"""
Get an existing VpcEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_accept: Accept the VPC endpoint (the VPC endpoint and service need to be in the same AWS account).
:param pulumi.Input[list] cidr_blocks: The list of CIDR blocks for the exposed AWS service. Applicable for endpoints of type `Gateway`.
:param pulumi.Input[list] dns_entries: The DNS entries for the VPC Endpoint. Applicable for endpoints of type `Interface`. DNS blocks are documented below.
:param pulumi.Input[list] network_interface_ids: One or more network interfaces for the VPC Endpoint. Applicable for endpoints of type `Interface`.
:param pulumi.Input[str] owner_id: The ID of the AWS account that owns the VPC endpoint.
:param pulumi.Input[str] policy: A policy to attach to the endpoint that controls access to the service. Defaults to full access. All `Gateway` and some `Interface` endpoints support policies - see the [relevant AWS documentation](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints-access.html) for more details.
:param pulumi.Input[str] prefix_list_id: The prefix list ID of the exposed AWS service. Applicable for endpoints of type `Gateway`.
:param pulumi.Input[bool] private_dns_enabled: Whether or not to associate a private hosted zone with the specified VPC. Applicable for endpoints of type `Interface`.
Defaults to `false`.
:param pulumi.Input[bool] requester_managed: Whether or not the VPC Endpoint is being managed by its service - `true` or `false`.
:param pulumi.Input[list] route_table_ids: One or more route table IDs. Applicable for endpoints of type `Gateway`.
:param pulumi.Input[list] security_group_ids: The ID of one or more security groups to associate with the network interface. Required for endpoints of type `Interface`.
:param pulumi.Input[str] service_name: The service name. For AWS services the service name is usually in the form `com.amazonaws.<region>.<service>` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker.<region>.notebook`).
:param pulumi.Input[str] state: The state of the VPC endpoint.
:param pulumi.Input[list] subnet_ids: The ID of one or more subnets in which to create a network interface for the endpoint. Applicable for endpoints of type `Interface`.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] vpc_endpoint_type: The VPC endpoint type, `Gateway` or `Interface`. Defaults to `Gateway`.
:param pulumi.Input[str] vpc_id: The ID of the VPC in which the endpoint will be used.
The **dns_entries** object supports the following:
* `dns_name` (`pulumi.Input[str]`) - The DNS name.
* `hosted_zone_id` (`pulumi.Input[str]`) - The ID of the private hosted zone.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["auto_accept"] = auto_accept
__props__["cidr_blocks"] = cidr_blocks
__props__["dns_entries"] = dns_entries
__props__["network_interface_ids"] = network_interface_ids
__props__["owner_id"] = owner_id
__props__["policy"] = policy
__props__["prefix_list_id"] = prefix_list_id
__props__["private_dns_enabled"] = private_dns_enabled
__props__["requester_managed"] = requester_managed
__props__["route_table_ids"] = route_table_ids
__props__["security_group_ids"] = security_group_ids
__props__["service_name"] = service_name
__props__["state"] = state
__props__["subnet_ids"] = subnet_ids
__props__["tags"] = tags
__props__["vpc_endpoint_type"] = vpc_endpoint_type
__props__["vpc_id"] = vpc_id
return VpcEndpoint(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 53.503571 | 366 | 0.678793 |
afcaee42220d9673d58f3d040843e58e091f90f4 | 404 | py | Python | code/subsets.py | shenhuaze/leetcode-python | b81bdb27d0f9da5620e83e2476c9ef585f4a0001 | [
"MIT"
] | 1 | 2019-06-17T04:37:39.000Z | 2019-06-17T04:37:39.000Z | code/subsets.py | shenhuaze/leetcode-python | b81bdb27d0f9da5620e83e2476c9ef585f4a0001 | [
"MIT"
] | null | null | null | code/subsets.py | shenhuaze/leetcode-python | b81bdb27d0f9da5620e83e2476c9ef585f4a0001 | [
"MIT"
] | null | null | null | """
@author Huaze Shen
@date 2019-09-28
"""
def subsets(nums):
results = [[]]
for i in range(len(nums)):
size = len(results)
for j in range(size):
results.append(results[j][:])
results[j].append(nums[i])
return results
if __name__ == '__main__':
nums_ = [1, 2, 3]
results_ = subsets(nums_)
for result in results_:
print(result)
| 18.363636 | 41 | 0.554455 |
b93479042726a9ea469a159e9439f289b7359e8c | 3,927 | py | Python | AppServer/google/appengine/ext/bulkload/simpletext_connector.py | loftwah/appscale | 586fc1347ebc743d7a632de698f4dbfb09ae38d6 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | AppServer/google/appengine/ext/bulkload/simpletext_connector.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/google/appengine/ext/bulkload/simpletext_connector.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Bulkloader Simple Text writing.
Handle the simpletext format specified in a bulkloader.yaml file.
"""
from google.appengine.ext.bulkload import bulkloader_errors
from google.appengine.ext.bulkload import connector_interface
class SimpleTextConnector(connector_interface.ConnectorInterface):
"""Write a text file from dicts for each record. Does not support import."""
VALID_MODES = ('text', 'nonewline', 'binary')
@classmethod
def create_from_options(cls, options, name):
"""Factory using an options dictionary.
Args:
options: Dictionary of options containing:
template: A Python dict-interpolation string. Required.
prolog: written before the per-record output.
epilog: written after the per-record output.
mode: one of the following, default is 'text'
text: text file mode, newlines between records.
nonewline: text file mode, no added newlines.
binary: binary file mode, no added newlines.
name: The name of this transformer, for use in error messages.
Returns:
SimpleTextConnector object described by the specified options.
Raises:
InvalidConfiguration: If the config is invalid.
"""
template = options.get('template')
if not template:
raise bulkloader_errors.InvalidConfiguration(
'simpletext must specify template. (In transformer named %s)' % name)
prolog = options.get('prolog')
epilog = options.get('epilog')
mode = options.get('mode', 'text')
return cls(template, prolog, epilog, mode, name)
def __init__(self, template, prolog=None, epilog=None, mode='text', name=''):
"""Constructor.
Args:
template: A Python dict-interpolation string.
prolog: written before the per-record output.
epilog: written after the per-record output.
mode: one of the following, default is 'text'
text: text file mode, newlines between records.
nonewline: text file mode, no added newlines.
binary: binary file mode, no added newlines.
"""
if mode not in self.VALID_MODES:
raise bulkloader_errors.InvalidConfiguration(
'simpletext mode must be one of "%s". (In transformer name %s.)' %
('", "'.join(self.VALID_MODES), name))
self.template = template
self.prolog = prolog
self.epilog = epilog
self.mode = mode
self.export_file_pointer = None
def initialize_export(self, filename, bulkload_state):
"""Open file and write prolog."""
self.bulkload_state = bulkload_state
mode = 'w'
if self.mode == 'binary':
mode = 'wb'
self.export_file_pointer = open(filename, mode)
if self.prolog:
self.export_file_pointer.write(self.prolog)
if self.mode == 'text':
self.export_file_pointer.write('\n')
def write_dict(self, dictionary):
"""Write one record for the specified entity."""
self.export_file_pointer.write(self.template % dictionary)
if self.mode == 'text':
self.export_file_pointer.write('\n')
def finalize_export(self):
"""Write epliog and close file after every record is written."""
if self.epilog:
self.export_file_pointer.write(self.epilog)
if self.mode == 'text':
self.export_file_pointer.write('\n')
self.export_file_pointer.close()
| 31.926829 | 79 | 0.692131 |
825bd070af0094185098a1c329f8948eed2be479 | 2,288 | py | Python | lib/surface/compute/security_policies/list_preconfigured_expression_sets.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/compute/security_policies/list_preconfigured_expression_sets.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/compute/security_policies/list_preconfigured_expression_sets.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list all available preconfigured expression sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
class ListPreconfiguredExpressionSets(base.ListCommand):
"""List all available preconfigured expression sets.
*{command}* lists all available preconfigured expression sets that can be used
with the Cloud Armor rules language.
## EXAMPLES
To list all current preconfigured expressions sets run this:
$ {command}
"""
@staticmethod
def Args(parser):
"""Set up arguments for this command."""
base.URI_FLAG.RemoveFromParser(parser)
parser.display_info.AddFormat("""
table(id:label=EXPRESSION_SET,
aliases:format="get([])",
expressions:format="table(id:label=RULE_ID)")
""")
def Run(self, args):
"""Issues the request to list available preconfigured expression sets."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client.apitools_client
messages = client.MESSAGES_MODULE
project = properties.VALUES.core.project.Get(required=True)
request = (
messages.ComputeSecurityPoliciesListPreconfiguredExpressionSetsRequest(
project=project))
response = client.securityPolicies.ListPreconfiguredExpressionSets(request)
if response.preconfiguredExpressionSets is not None:
return response.preconfiguredExpressionSets.wafRules.expressionSets
return response.preconfiguredExpressionSets
| 35.2 | 80 | 0.754808 |
ea6f62b5bc4f45cefd9fb009c9276ba663bedfca | 10,517 | py | Python | HW4 - 95542247/q4/gen/Assignment4q4Lexer.py | SadraGoudarzdashti/IUSTCompiler | 7aa24df7de10030c313ad2e8f3830d9e2b182ce1 | [
"MIT"
] | null | null | null | HW4 - 95542247/q4/gen/Assignment4q4Lexer.py | SadraGoudarzdashti/IUSTCompiler | 7aa24df7de10030c313ad2e8f3830d9e2b182ce1 | [
"MIT"
] | null | null | null | HW4 - 95542247/q4/gen/Assignment4q4Lexer.py | SadraGoudarzdashti/IUSTCompiler | 7aa24df7de10030c313ad2e8f3830d9e2b182ce1 | [
"MIT"
] | null | null | null | # Generated from C:/Users/novin/PycharmProjects/tamrin-compiler\Assignment4q4.g4 by ANTLR 4.9.1
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2)")
buf.write("\u010c\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\3\2\3\2\3\2\3\2\3\2\3\2\3\3\3\3\3")
buf.write("\4\3\4\3\4\3\4\3\4\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\5\3\5")
buf.write("\3\6\3\6\3\6\3\6\3\6\3\7\3\7\3\7\3\7\3\7\3\b\3\b\3\t\3")
buf.write("\t\3\t\3\t\3\t\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3\r\3")
buf.write("\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3")
buf.write("\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22")
buf.write("\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24")
buf.write("\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26")
buf.write("\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27")
buf.write("\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\27\3\30")
buf.write("\3\30\3\31\3\31\3\31\3\32\3\32\3\33\3\33\3\34\3\34\3\35")
buf.write("\3\35\3\36\3\36\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3 ")
buf.write("\3 \3 \3 \3 \3!\3!\3!\3!\3!\3!\3\"\3\"\3\"\3\"\3\"\3#")
buf.write("\3#\3#\3#\3$\3$\3%\3%\7%\u00f1\n%\f%\16%\u00f4\13%\3&")
buf.write("\6&\u00f7\n&\r&\16&\u00f8\3\'\6\'\u00fc\n\'\r\'\16\'\u00fd")
buf.write("\3\'\3\'\3(\3(\3(\3(\7(\u0106\n(\f(\16(\u0109\13(\3(\3")
buf.write("(\2\2)\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\25\f\27")
buf.write("\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25)\26+\27-\30")
buf.write("/\31\61\32\63\33\65\34\67\359\36;\37= ?!A\"C#E$G%I&K\'")
buf.write("M(O)\3\2\7\4\2C\\c|\6\2\62;C\\aac|\3\2\62;\5\2\13\f\17")
buf.write("\17\"\"\4\2\f\f\17\17\2\u010f\2\3\3\2\2\2\2\5\3\2\2\2")
buf.write("\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17")
buf.write("\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3")
buf.write("\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2")
buf.write("\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3")
buf.write("\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2")
buf.write("\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3")
buf.write("\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E")
buf.write("\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2")
buf.write("O\3\2\2\2\3Q\3\2\2\2\5W\3\2\2\2\7Y\3\2\2\2\t`\3\2\2\2")
buf.write("\13g\3\2\2\2\rl\3\2\2\2\17q\3\2\2\2\21s\3\2\2\2\23z\3")
buf.write("\2\2\2\25|\3\2\2\2\27~\3\2\2\2\31\u0080\3\2\2\2\33\u0082")
buf.write("\3\2\2\2\35\u008a\3\2\2\2\37\u008c\3\2\2\2!\u008e\3\2")
buf.write("\2\2#\u0095\3\2\2\2%\u0099\3\2\2\2\'\u00a1\3\2\2\2)\u00a4")
buf.write("\3\2\2\2+\u00a9\3\2\2\2-\u00af\3\2\2\2/\u00c2\3\2\2\2")
buf.write("\61\u00c4\3\2\2\2\63\u00c7\3\2\2\2\65\u00c9\3\2\2\2\67")
buf.write("\u00cb\3\2\2\29\u00cd\3\2\2\2;\u00cf\3\2\2\2=\u00d1\3")
buf.write("\2\2\2?\u00d8\3\2\2\2A\u00dd\3\2\2\2C\u00e3\3\2\2\2E\u00e8")
buf.write("\3\2\2\2G\u00ec\3\2\2\2I\u00ee\3\2\2\2K\u00f6\3\2\2\2")
buf.write("M\u00fb\3\2\2\2O\u0101\3\2\2\2QR\7e\2\2RS\7n\2\2ST\7c")
buf.write("\2\2TU\7u\2\2UV\7u\2\2V\4\3\2\2\2WX\7}\2\2X\6\3\2\2\2")
buf.write("YZ\7r\2\2Z[\7w\2\2[\\\7d\2\2\\]\7n\2\2]^\7k\2\2^_\7e\2")
buf.write("\2_\b\3\2\2\2`a\7u\2\2ab\7v\2\2bc\7c\2\2cd\7v\2\2de\7")
buf.write("k\2\2ef\7e\2\2f\n\3\2\2\2gh\7x\2\2hi\7q\2\2ij\7k\2\2j")
buf.write("k\7f\2\2k\f\3\2\2\2lm\7o\2\2mn\7c\2\2no\7k\2\2op\7p\2")
buf.write("\2p\16\3\2\2\2qr\7*\2\2r\20\3\2\2\2st\7U\2\2tu\7v\2\2")
buf.write("uv\7t\2\2vw\7k\2\2wx\7p\2\2xy\7i\2\2y\22\3\2\2\2z{\7]")
buf.write("\2\2{\24\3\2\2\2|}\7_\2\2}\26\3\2\2\2~\177\7+\2\2\177")
buf.write("\30\3\2\2\2\u0080\u0081\7\177\2\2\u0081\32\3\2\2\2\u0082")
buf.write("\u0083\7g\2\2\u0083\u0084\7z\2\2\u0084\u0085\7v\2\2\u0085")
buf.write("\u0086\7g\2\2\u0086\u0087\7p\2\2\u0087\u0088\7f\2\2\u0088")
buf.write("\u0089\7u\2\2\u0089\34\3\2\2\2\u008a\u008b\7=\2\2\u008b")
buf.write("\36\3\2\2\2\u008c\u008d\7.\2\2\u008d \3\2\2\2\u008e\u008f")
buf.write("\7t\2\2\u008f\u0090\7g\2\2\u0090\u0091\7v\2\2\u0091\u0092")
buf.write("\7w\2\2\u0092\u0093\7t\2\2\u0093\u0094\7p\2\2\u0094\"")
buf.write("\3\2\2\2\u0095\u0096\7k\2\2\u0096\u0097\7p\2\2\u0097\u0098")
buf.write("\7v\2\2\u0098$\3\2\2\2\u0099\u009a\7d\2\2\u009a\u009b")
buf.write("\7q\2\2\u009b\u009c\7q\2\2\u009c\u009d\7n\2\2\u009d\u009e")
buf.write("\7g\2\2\u009e\u009f\7c\2\2\u009f\u00a0\7p\2\2\u00a0&\3")
buf.write("\2\2\2\u00a1\u00a2\7k\2\2\u00a2\u00a3\7h\2\2\u00a3(\3")
buf.write("\2\2\2\u00a4\u00a5\7g\2\2\u00a5\u00a6\7n\2\2\u00a6\u00a7")
buf.write("\7u\2\2\u00a7\u00a8\7g\2\2\u00a8*\3\2\2\2\u00a9\u00aa")
buf.write("\7y\2\2\u00aa\u00ab\7j\2\2\u00ab\u00ac\7k\2\2\u00ac\u00ad")
buf.write("\7n\2\2\u00ad\u00ae\7g\2\2\u00ae,\3\2\2\2\u00af\u00b0")
buf.write("\7U\2\2\u00b0\u00b1\7{\2\2\u00b1\u00b2\7u\2\2\u00b2\u00b3")
buf.write("\7v\2\2\u00b3\u00b4\7g\2\2\u00b4\u00b5\7o\2\2\u00b5\u00b6")
buf.write("\7\60\2\2\u00b6\u00b7\7q\2\2\u00b7\u00b8\7w\2\2\u00b8")
buf.write("\u00b9\7v\2\2\u00b9\u00ba\7\60\2\2\u00ba\u00bb\7r\2\2")
buf.write("\u00bb\u00bc\7t\2\2\u00bc\u00bd\7k\2\2\u00bd\u00be\7p")
buf.write("\2\2\u00be\u00bf\7v\2\2\u00bf\u00c0\7n\2\2\u00c0\u00c1")
buf.write("\7p\2\2\u00c1.\3\2\2\2\u00c2\u00c3\7?\2\2\u00c3\60\3\2")
buf.write("\2\2\u00c4\u00c5\7(\2\2\u00c5\u00c6\7(\2\2\u00c6\62\3")
buf.write("\2\2\2\u00c7\u00c8\7>\2\2\u00c8\64\3\2\2\2\u00c9\u00ca")
buf.write("\7-\2\2\u00ca\66\3\2\2\2\u00cb\u00cc\7/\2\2\u00cc8\3\2")
buf.write("\2\2\u00cd\u00ce\7,\2\2\u00ce:\3\2\2\2\u00cf\u00d0\7\60")
buf.write("\2\2\u00d0<\3\2\2\2\u00d1\u00d2\7n\2\2\u00d2\u00d3\7g")
buf.write("\2\2\u00d3\u00d4\7p\2\2\u00d4\u00d5\7i\2\2\u00d5\u00d6")
buf.write("\7v\2\2\u00d6\u00d7\7j\2\2\u00d7>\3\2\2\2\u00d8\u00d9")
buf.write("\7v\2\2\u00d9\u00da\7t\2\2\u00da\u00db\7w\2\2\u00db\u00dc")
buf.write("\7g\2\2\u00dc@\3\2\2\2\u00dd\u00de\7h\2\2\u00de\u00df")
buf.write("\7c\2\2\u00df\u00e0\7n\2\2\u00e0\u00e1\7u\2\2\u00e1\u00e2")
buf.write("\7g\2\2\u00e2B\3\2\2\2\u00e3\u00e4\7v\2\2\u00e4\u00e5")
buf.write("\7j\2\2\u00e5\u00e6\7k\2\2\u00e6\u00e7\7u\2\2\u00e7D\3")
buf.write("\2\2\2\u00e8\u00e9\7p\2\2\u00e9\u00ea\7g\2\2\u00ea\u00eb")
buf.write("\7y\2\2\u00ebF\3\2\2\2\u00ec\u00ed\7#\2\2\u00edH\3\2\2")
buf.write("\2\u00ee\u00f2\t\2\2\2\u00ef\u00f1\t\3\2\2\u00f0\u00ef")
buf.write("\3\2\2\2\u00f1\u00f4\3\2\2\2\u00f2\u00f0\3\2\2\2\u00f2")
buf.write("\u00f3\3\2\2\2\u00f3J\3\2\2\2\u00f4\u00f2\3\2\2\2\u00f5")
buf.write("\u00f7\t\4\2\2\u00f6\u00f5\3\2\2\2\u00f7\u00f8\3\2\2\2")
buf.write("\u00f8\u00f6\3\2\2\2\u00f8\u00f9\3\2\2\2\u00f9L\3\2\2")
buf.write("\2\u00fa\u00fc\t\5\2\2\u00fb\u00fa\3\2\2\2\u00fc\u00fd")
buf.write("\3\2\2\2\u00fd\u00fb\3\2\2\2\u00fd\u00fe\3\2\2\2\u00fe")
buf.write("\u00ff\3\2\2\2\u00ff\u0100\b\'\2\2\u0100N\3\2\2\2\u0101")
buf.write("\u0102\7\61\2\2\u0102\u0103\7\61\2\2\u0103\u0107\3\2\2")
buf.write("\2\u0104\u0106\n\6\2\2\u0105\u0104\3\2\2\2\u0106\u0109")
buf.write("\3\2\2\2\u0107\u0105\3\2\2\2\u0107\u0108\3\2\2\2\u0108")
buf.write("\u010a\3\2\2\2\u0109\u0107\3\2\2\2\u010a\u010b\b(\2\2")
buf.write("\u010bP\3\2\2\2\7\2\u00f2\u00f8\u00fd\u0107\3\b\2\2")
return buf.getvalue()
class Assignment4q4Lexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
T__19 = 20
T__20 = 21
T__21 = 22
T__22 = 23
T__23 = 24
T__24 = 25
T__25 = 26
T__26 = 27
T__27 = 28
T__28 = 29
T__29 = 30
T__30 = 31
T__31 = 32
T__32 = 33
T__33 = 34
T__34 = 35
IDENTIFIER = 36
INTEGER_LITERAL = 37
WS = 38
LINE_COMMENT = 39
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'class'", "'{'", "'public'", "'static'", "'void'", "'main'",
"'('", "'String'", "'['", "']'", "')'", "'}'", "'extends'",
"';'", "','", "'return'", "'int'", "'boolean'", "'if'", "'else'",
"'while'", "'System.out.println'", "'='", "'&&'", "'<'", "'+'",
"'-'", "'*'", "'.'", "'length'", "'true'", "'false'", "'this'",
"'new'", "'!'" ]
symbolicNames = [ "<INVALID>",
"IDENTIFIER", "INTEGER_LITERAL", "WS", "LINE_COMMENT" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "T__19",
"T__20", "T__21", "T__22", "T__23", "T__24", "T__25",
"T__26", "T__27", "T__28", "T__29", "T__30", "T__31",
"T__32", "T__33", "T__34", "IDENTIFIER", "INTEGER_LITERAL",
"WS", "LINE_COMMENT" ]
grammarFileName = "Assignment4q4.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.9.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 53.116162 | 103 | 0.543216 |
a8ee90113def8c7db3a3a7bf03f74791cc5fb9f3 | 6,343 | py | Python | ascii_art/ascii_art.py | JustinHBird/ascii_art | b34684a706863d38e3e1cbb6b45ac868b8d9c528 | [
"MIT"
] | null | null | null | ascii_art/ascii_art.py | JustinHBird/ascii_art | b34684a706863d38e3e1cbb6b45ac868b8d9c528 | [
"MIT"
] | 4 | 2021-06-08T20:33:30.000Z | 2022-03-12T00:03:56.000Z | ascii_art/ascii_art.py | JustinHBird/ascii_art | b34684a706863d38e3e1cbb6b45ac868b8d9c528 | [
"MIT"
] | null | null | null | import os
import math
from subprocess import Popen, PIPE
from PIL import Image
from brightness import Brightness
class AsciiArt:
"""Class to convert .jpg or .png image to ascii art.
Attributes:
ascii_chars: A string of ascii characters to be used in generating the image.
x_calibrate: An int value to calibrate the output to the non-square character spacing of the terminal.
y_calibrate: An int value to calibrate the output to the non-square characrer spacing of the terminal.
brightness_calc: A string to designate the brightness calculation type.
inverse: A boolean value to designate weather or not to inverse the ascii character string for image generation.
image: A PIL Image object containing the imported image.
Public Methods:
print_to_terminal(): Prints the ascii art image to the terminal.
print_to_file(): Prints the ascii art image to .txt file.
"""
def __init__(self, image_path):
"""Inits the AsciiArt class.
Loads a .jpg, .jpeg or .png image to a PIL Image to be processed as ascii art.
Scaling defaults are set and image is set to false. These can be modified by
accessing the object attribute directly. i.e.
a = AsciiArt('path/to/image')
# To modify inverse:
a.inverse = True
Args:
image_path: A string containing the path of the image to be processed.
"""
#self.ascii_chars = ' `^",:;Il!i~+_-?][}{1)(|/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$'
self.ascii_chars = ' `":i|nhH0#'
self.x_calibrate = 1
self.y_calibrate = 2
self.brightness_calc = 'average'
self.inverse = False
self.image = Image.open(image_path)
def print_to_terminal(self):
"""Prints ascii_arr to terminal."""
for ascii_row in self.process_ascii_art('terminal'):
print(ascii_row)
def print_to_file(self, path):
"""Saves ascii_arr to .txt file."""
with open(path + '/ascii_art.txt', 'w') as f:
for ascii_row in self.process_ascii_art('file'):
f.write(ascii_row + "\n")
def process_ascii_art(self, destination):
# Glue function to take PIL Image object, calculate brightness for each pixel and map to an ascii character.
# The function yields it's output at every completed row which is consumed by print_to_terminal() or
# print_to_file().
# Scale image for output
if destination == 'terminal':
# Output to terminal
terminal_scale = self.scale_for_terminal()
(new_width, new_height) = (self.image.width//(self.x_calibrate * terminal_scale), self.image.height//(self.y_calibrate * terminal_scale))
else:
# Output to file (8.5 X 11 assumed)
page_scale = self.scale_for_page()
(new_width, new_height) = (self.image.width//(self.x_calibrate * page_scale), self.image.height//(self.y_calibrate * page_scale))
# Create resized Image instance to process.
scaled_image = self.image.resize((int(new_width), int(new_height)))
# Initiate brightness calc object
bc = Brightness(self.brightness_calc)
min_brightness = min(bc.calc(pixel) for pixel in scaled_image.getdata())
max_brightness = max(bc.calc(pixel) for pixel in scaled_image.getdata())
brightness_range = max_brightness - min_brightness
# Build ascii_art pixel to char array
ascii_row = []
for i, p in enumerate(scaled_image.getdata()):
if i % scaled_image.width - 1 == 0:
yield ''.join(ascii_row)
ascii_row = []
else:
adjusted_brightness = bc.calc(p) - min_brightness
ascii_char = self.brightness_to_char(adjusted_brightness, brightness_range)
ascii_row.append(ascii_char)
def scale_for_terminal(self):
term_size = Popen('stty size', shell=True, stdout=PIPE).communicate()
term_height, term_width = map(lambda n: int(n) - 1, term_size[0].decode('utf-8').split())
return self.scale_image(term_width, term_height)
def scale_for_page(self):
# Need to determine optimal 8.5 X 11 character dimensions.
page_width = 150
page_height = 150
return self.scale_image(page_width, page_height)
def scale_image(self, dest_width, dest_height):
# Scale for terminal character size (based on x_calibrate and y_calibrate attribute)
img_width = self.image.width // self.x_calibrate
img_height = self.image.height // self.y_calibrate
if img_width <= dest_width and img_height <= dest_height:
return 1
else:
img_scale = img_width / img_height
output_width = output_height = 0
# Scale for availible terminal size. Needs to check based on width and height since both can vary
if dest_width / img_scale <= dest_height:
output_width = dest_width
output_height = dest_width / img_scale
if img_scale * dest_height <= dest_width and dest_height > output_height:
output_width = img_scale * dest_height
output_height = dest_height
return math.ceil(img_width / output_width)
def image_info(self):
"""Prints the PIL image object information."""
if self.image:
print(f'Image size: {self.image.size[0]} x {self.image.size[1]}')
def brightness_to_char(self, brightness, brightness_range):
if self.inverse:
ascii_chars = self.ascii_chars[::-1]
else:
ascii_chars = self.ascii_chars
return ascii_chars[round(brightness * ((len(ascii_chars)-1)/brightness_range))]
if __name__ == "__main__":
# Get relative path to data folder for image file
app_path = os.path.abspath(__file__)
app_dir = os.path.dirname(app_path)
parent_dir = os.path.dirname(app_dir)
data_dir = os.path.join(parent_dir, 'data')
jpg_image = os.path.join(data_dir, 'm.jpg')
a = AsciiArt(jpg_image)
a.print_to_terminal()
| 38.210843 | 149 | 0.63172 |
1c2e797e399706d5a8b5cbb79669143ef784f79c | 1,117 | py | Python | setup.py | ragnarok22/ptb-django-cookiecutter | 4a06df669052ec24fcca47c01c50bc20fc0a8561 | [
"BSD-3-Clause"
] | 18 | 2021-06-23T07:41:26.000Z | 2022-02-04T07:56:39.000Z | setup.py | ragnarok22/ptb-django-cookiecutter | 4a06df669052ec24fcca47c01c50bc20fc0a8561 | [
"BSD-3-Clause"
] | 5 | 2021-07-11T03:24:58.000Z | 2021-11-01T20:17:38.000Z | setup.py | ragnarok22/ptb-django-cookiecutter | 4a06df669052ec24fcca47c01c50bc20fc0a8561 | [
"BSD-3-Clause"
] | 7 | 2021-08-10T20:36:03.000Z | 2021-12-13T18:35:57.000Z | # !/usr/bin/env python
from distutils.core import setup
setup(
name='ptb-django-cookiecutter',
packages=[],
version='0.1.1',
description='A simple cookiecutter to create Python Telegram bot, wrapped with Django.',
author='Carlos Lugones',
license='MIT',
author_email='[email protected]',
url='https://github.com/lugodev/ptb-django-cookiecutter',
keywords=['cookiecutter', 'template', 'package', ],
python_requires='>=3.8',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development',
],
)
| 36.032258 | 92 | 0.617726 |
8049cda86f3a4eb2f66e099618af088d4cd0b00e | 4,464 | py | Python | test/python/circuit/test_matrix_gate.py | quantumkoen/qiskit-terra | 495046d07471e64eab6ddbdfdf8bdef88f0c644f | [
"Apache-2.0"
] | null | null | null | test/python/circuit/test_matrix_gate.py | quantumkoen/qiskit-terra | 495046d07471e64eab6ddbdfdf8bdef88f0c644f | [
"Apache-2.0"
] | null | null | null | test/python/circuit/test_matrix_gate.py | quantumkoen/qiskit-terra | 495046d07471e64eab6ddbdfdf8bdef88f0c644f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=unused-import
"""Test matrix gates"""
import os
import tempfile
import unittest
import numpy
import qiskit.extensions.simulator
from qiskit import BasicAer
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit import execute
from qiskit import QiskitError
from qiskit.circuit import Gate
from qiskit.test import QiskitTestCase
from qiskit.transpiler import transpile, PassManager
from qiskit.transpiler.passes import BasicSwap, CXCancellation, Optimize1qGates
from qiskit.converters import circuit_to_dag
from qiskit.converters import circuits_to_qobj
from qiskit.extensions.standard.unitary_matrix import UnitaryMatrixGate
class TestMatrixGate(QiskitTestCase):
"""Matrix gate tests."""
def test_1q_unitary(self):
"""test 1 qubit unitary matrix"""
qr = QuantumRegister(1)
cr = ClassicalRegister(1)
qc = QuantumCircuit(qr, cr)
matrix = numpy.array([[1, 0], [0, 1]])
qc.x(qr[0])
qc.unitary(matrix, qr[0])
# test of qasm output
self.log.info(qc.qasm())
# test of text drawer
self.log.info(qc)
dag = circuit_to_dag(qc)
node_ids = dag.named_nodes('unitary')
self.assertTrue(len(node_ids) == 1)
dnode = dag.multi_graph.node[node_ids[0]]
self.assertIsInstance(dnode['op'], UnitaryMatrixGate)
for qubit in dnode['qargs']:
self.assertTrue(qubit[1] in [0, 1])
self.assertTrue(numpy.allclose(dnode['op'].matrix_rep,
matrix))
def test_2q_unitary(self):
"""test 2 qubit unitary matrix"""
backend = BasicAer.get_backend('qasm_simulator')
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
sigmax = numpy.array([[0, 1], [1, 0]])
sigmay = numpy.array([[0, -1j], [1j, 0]])
matrix = numpy.kron(sigmax, sigmay)
qc.x(qr[0])
qc.unitary(matrix, qr[0], qr[1])
passman = PassManager()
passman.append(CXCancellation())
qc2 = transpile(qc, backend, pass_manager=passman)
# test of qasm output
self.log.info(qc2.qasm())
# test of text drawer
self.log.info(qc2)
dag = circuit_to_dag(qc)
nodes = dag.twoQ_nodes()
self.assertTrue(len(nodes) == 1)
dnode = nodes[0]
self.assertIsInstance(dnode['op'], UnitaryMatrixGate)
for qubit in dnode['qargs']:
self.assertTrue(qubit[1] in [0, 1])
self.assertTrue(numpy.allclose(dnode['op'].matrix_rep,
matrix))
def test_3q_unitary(self):
"""test 3 qubit unitary matrix on non-consecutive bits"""
qr = QuantumRegister(4)
qc = QuantumCircuit(qr)
sigmax = numpy.array([[0, 1], [1, 0]])
sigmay = numpy.array([[0, -1j], [1j, 0]])
matrix = numpy.kron(sigmay, numpy.kron(sigmax, sigmay))
qc.x(qr[0])
qc.unitary(matrix, qr[0], qr[1], qr[3])
qc.cx(qr[3], qr[2])
# test of qasm output
self.log.info(qc.qasm())
# test of text drawer
self.log.info(qc)
dag = circuit_to_dag(qc)
nodes = dag.threeQ_or_more_nodes()
self.assertTrue(len(nodes) == 1)
dnode = nodes[0][1]
self.assertIsInstance(dnode['op'], UnitaryMatrixGate)
for qubit in dnode['qargs']:
self.assertTrue(qubit[1] in [0, 1, 3])
self.assertTrue(numpy.allclose(dnode['op'].matrix_rep,
matrix))
def test_qobj_with_unitary_matrix(self):
"""test qobj output with unitary matrix"""
qr = QuantumRegister(4)
qc = QuantumCircuit(qr)
sigmax = numpy.array([[0, 1], [1, 0]])
sigmay = numpy.array([[0, -1j], [1j, 0]])
matrix = numpy.kron(sigmay, numpy.kron(sigmax, sigmay))
qc.x(qr[0])
qc.unitary(matrix, qr[0], qr[1], qr[3])
qc.cx(qr[3], qr[2])
qobj = circuits_to_qobj(qc)
instr = qobj.experiments[0].instructions[1]
self.assertEqual(instr.name, 'unitary')
self.assertTrue(numpy.allclose(
numpy.array(instr.params).astype(numpy.complex64),
matrix))
| 36 | 79 | 0.607079 |
b01e26c9aefab074df1983f12a5b1132baba1b38 | 128 | py | Python | animal_colors/__init__.py | afeinstein20/animal_colors | 0673ac558824251a7d599dccb6333f03983b6d8e | [
"MIT"
] | null | null | null | animal_colors/__init__.py | afeinstein20/animal_colors | 0673ac558824251a7d599dccb6333f03983b6d8e | [
"MIT"
] | null | null | null | animal_colors/__init__.py | afeinstein20/animal_colors | 0673ac558824251a7d599dccb6333f03983b6d8e | [
"MIT"
] | null | null | null | import os
PACKAGEDIR = os.path.abspath(os.path.dirname(__file__))
from .animal_sensitivity import *
from .to_colormap import *
| 21.333333 | 55 | 0.789063 |
4f6a4bbcd7617d162a396e7af4a28bf496775fa7 | 85,534 | py | Python | grr/server/grr_response_server/data_store_test.py | ahmednofal/grr | 08a57f6873ee13f425d0106e4143663bc6dbdd60 | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/data_store_test.py | ahmednofal/grr | 08a57f6873ee13f425d0106e4143663bc6dbdd60 | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/data_store_test.py | ahmednofal/grr | 08a57f6873ee13f425d0106e4143663bc6dbdd60 | [
"Apache-2.0"
] | 2 | 2020-08-24T00:22:03.000Z | 2020-11-14T08:34:43.000Z | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""These are basic tests for the data store abstraction.
Implementations should be able to pass these tests to be conformant.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import functools
import inspect
import logging
import operator
import os
import random
import string
import tempfile
import threading
import time
import _thread
from builtins import range # pylint: disable=redefined-builtin
from future.utils import iteritems
from future.utils import iterkeys
from future.utils import itervalues
import mock
import pytest
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import csv
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import flow
from grr_response_server import queue_manager
from grr_response_server import sequential_collection
from grr_response_server import threadpool
from grr_response_server import worker_lib
from grr_response_server.aff4_objects import aff4_grr
from grr_response_server.aff4_objects import standard
from grr_response_server.flows.general import filesystem
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import benchmark_test_lib
from grr.test_lib import test_lib
class StringSequentialCollection(
sequential_collection.IndexedSequentialCollection):
RDF_TYPE = rdfvalue.RDFString
def DeletionTest(f):
"""This indicates a test that uses deletion."""
@functools.wraps(f)
def Decorator(testinstance):
if testinstance.TEST_DELETION:
return f(testinstance)
else:
return testinstance.skipTest("Tests that use deletion are disabled "
"for this data store.")
return Decorator
def DBSubjectLockTest(f):
"""This indicates a test that uses locks."""
@functools.wraps(f)
def Decorator(testinstance):
if testinstance.TEST_DBSUBJECTLOCKS:
return f(testinstance)
else:
return testinstance.skipTest("Tests that use locks are disabled "
"for this data store.")
return Decorator
class DataStoreTestMixin(object):
"""Test the data store abstraction.
Note that when testing timestamp behavior the cloud bigtable datastore only
has ms precision.
"""
test_row = "aff4:/row:foo"
lease_row = u"aff4:/leasetest"
# This flag controls if tests can also delete data. Some data stores don't
# support deletion so those tests will fail for them.
TEST_DELETION = True
# The same applies to locks.
TEST_DBSUBJECTLOCKS = True
def setUp(self):
super(DataStoreTestMixin, self).setUp()
data_store.DB.ClearTestDB()
def _TruncateToMilliseconds(self, timestamp_int):
timestamp_int -= (timestamp_int % 1000)
return timestamp_int
def testSetResolve(self):
"""Test the Set() and Resolve() methods."""
predicate = "task:00000001"
value = rdf_flows.GrrMessage(session_id="session")
# Ensure that setting a value is immediately available.
data_store.DB.Set(self.test_row, predicate, value)
time.sleep(1)
data_store.DB.Set(self.test_row + "X", predicate, value)
stored_proto, _ = data_store.DB.Resolve(self.test_row, predicate)
stored_proto = rdf_flows.GrrMessage.FromSerializedString(stored_proto)
self.assertEqual(stored_proto.session_id, value.session_id)
def testSetResolveNegativeInteger(self):
data_store.DB.Set(self.test_row, "aff4:lastchunk", -1)
value, _ = data_store.DB.Resolve(self.test_row, "aff4:lastchunk")
self.assertEqual(value, -1)
def testMultiSet(self):
"""Test the MultiSet() methods."""
unicode_string = u"this is a uñîcödé string"
data_store.DB.MultiSet(
self.test_row, {
"aff4:size": [1],
"aff4:stored": [unicode_string],
"aff4:unknown_attribute": ["hello"]
})
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:size")
self.assertEqual(stored, 1)
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:stored")
self.assertEqual(stored, unicode_string)
# Make sure that unknown attributes are stored as bytes.
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:unknown_attribute")
self.assertEqual(stored, "hello")
self.assertEqual(type(stored), str)
def testMultiSetTimestamps(self):
unicode_string = u"this is a uñîcödé string"
data_store.DB.MultiSet(self.test_row, {
"aff4:size": [(1, 1000)],
"aff4:stored": [(unicode_string, 2000)]
})
stored, ts = data_store.DB.Resolve(self.test_row, "aff4:size")
self.assertEqual(stored, 1)
self.assertEqual(ts, 1000)
stored, ts = data_store.DB.Resolve(self.test_row, "aff4:stored")
self.assertEqual(stored, unicode_string)
self.assertEqual(ts, 2000)
def testMultiSetNoneTimestampIsNow(self):
unicode_string = u"this is a uñîcödé string"
start_time = time.time() * 1e6
# Test None timestamp is translated to current time.
data_store.DB.MultiSet(self.test_row, {
"aff4:size": [(1, None)],
"aff4:stored": [(unicode_string, 2000)]
})
end_time = time.time() * 1e6
stored, ts = data_store.DB.Resolve(self.test_row, "aff4:size")
self.assertEqual(stored, 1)
self.assertGreaterEqual(ts, start_time)
self.assertLessEqual(ts, end_time)
stored, ts = data_store.DB.Resolve(self.test_row, "aff4:stored")
self.assertEqual(stored, unicode_string)
self.assertEqual(ts, 2000)
def testMultiSetAsync(self):
"""Test the async MultiSet() methods."""
unicode_string = u"this is a uñîcödé string"
data_store.DB.MultiSet(
self.test_row, {
"aff4:size": [3],
"aff4:stored": [unicode_string],
"aff4:unknown_attribute": ["hello"]
},
sync=False)
data_store.DB.Flush()
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:size")
self.assertEqual(stored, 3)
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:stored")
self.assertEqual(stored, unicode_string)
# Make sure that unknown attributes are stored as bytes.
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:unknown_attribute")
self.assertEqual(stored, "hello")
self.assertEqual(type(stored), str)
def testMultiSet2(self):
"""Test the MultiSet() methods."""
# Specify a per element timestamp
data_store.DB.MultiSet(self.test_row, {
"aff4:size": [(1, 1000)],
"aff4:stored": [("2", 2000)]
})
stored, ts = data_store.DB.Resolve(self.test_row, "aff4:size")
self.assertEqual(stored, 1)
self.assertEqual(ts, 1000)
stored, ts = data_store.DB.Resolve(self.test_row, "aff4:stored")
self.assertEqual(stored, "2")
self.assertEqual(ts, 2000)
def testMultiSet3(self):
"""Test the MultiSet() delete methods."""
data_store.DB.MultiSet(self.test_row, {
"aff4:size": [1],
"aff4:stored": ["2"]
})
data_store.DB.MultiSet(
self.test_row, {"aff4:stored": ["2"]}, to_delete=["aff4:size"])
# This should be gone now
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:size")
self.assertIsNone(stored)
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:stored")
self.assertEqual(stored, "2")
def testMultiSet4(self):
"""Test the MultiSet() delete methods when deleting the same predicate."""
data_store.DB.MultiSet(self.test_row, {
"aff4:size": [1],
"aff4:stored": ["2"]
})
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [4]}, to_delete=["aff4:size"])
# This should only produce a single result
count = 0
for count, (predicate, value, _) in enumerate(
data_store.DB.ResolvePrefix(
self.test_row, "aff4:size",
timestamp=data_store.DB.ALL_TIMESTAMPS)):
self.assertEqual(value, 4)
self.assertEqual(predicate, "aff4:size")
self.assertEqual(count, 0)
def testMultiSetSetsTimestapWhenReplacing(self):
data_store.DB.MultiSet(
self.test_row, {"aff4:size": [(1, 1000)]}, replace=True)
stored, ts = data_store.DB.Resolve(self.test_row, "aff4:size")
self.assertEqual(stored, 1)
self.assertEqual(ts, 1000)
def testMultiSetRemovesOtherValuesWhenReplacing(self):
data_store.DB.MultiSet(
self.test_row, {"aff4:stored": [("2", 1000), ("3", 4000)]},
replace=False)
values = data_store.DB.ResolvePrefix(
self.test_row, "aff4:stored", timestamp=data_store.DB.ALL_TIMESTAMPS)
self.assertListEqual(values, [("aff4:stored", "3", 4000),
("aff4:stored", "2", 1000)])
data_store.DB.MultiSet(
self.test_row, {"aff4:stored": [("4", 3000)]}, replace=True)
values = data_store.DB.ResolvePrefix(
self.test_row, "aff4:stored", timestamp=data_store.DB.ALL_TIMESTAMPS)
self.assertListEqual(values, [("aff4:stored", "4", 3000)])
@DeletionTest
def testDeleteAttributes(self):
"""Test we can delete an attribute."""
predicate = "metadata:predicate"
data_store.DB.Set(self.test_row, predicate, "hello")
# Check it's there.
stored, _ = data_store.DB.Resolve(self.test_row, predicate)
self.assertEqual(stored, "hello")
data_store.DB.DeleteAttributes(self.test_row, [predicate], sync=True)
stored, _ = data_store.DB.Resolve(self.test_row, predicate)
self.assertIsNone(stored)
@DeletionTest
def testMultiDeleteAttributes(self):
"""Test we can delete multiple attributes at once."""
test_rows = ["aff4:/row/%i" % i for i in range(0, 10)]
predicate_1 = "metadata:predicate1"
predicate_2 = "metadata:predicate2"
for row in test_rows:
data_store.DB.Set(row, predicate_1, "hello")
data_store.DB.Set(row, predicate_2, "hello")
self.assertEqual(
10, len(list(data_store.DB.ScanAttribute("aff4:/row/", predicate_1))))
self.assertEqual(
10, len(list(data_store.DB.ScanAttribute("aff4:/row/", predicate_2))))
data_store.DB.MultiDeleteAttributes(test_rows, [predicate_1, predicate_2])
self.assertFalse(
list(data_store.DB.ScanAttribute("aff4:/row/", predicate_1)))
self.assertFalse(
list(data_store.DB.ScanAttribute("aff4:/row/", predicate_2)))
def CheckLength(self, predicate, l):
all_attributes = data_store.DB.ResolveMulti(
self.test_row, [predicate], timestamp=(0, 5000))
self.assertEqual(len(list(all_attributes)), l)
def CheckLast(self, predicate, expected_value, exptected_ts):
stored, ts = data_store.DB.Resolve(self.test_row, predicate)
self.assertEqual(stored, expected_value)
self.assertEqual(ts, exptected_ts)
@DeletionTest
def testDeleteAttributesTimestamps(self):
"""Test we can delete an attribute in a time range."""
predicate = "metadata:tspredicate"
data_store.DB.Set(
self.test_row, predicate, "hello1000", timestamp=1000, replace=False)
data_store.DB.Set(
self.test_row, predicate, "hello2000", timestamp=2000, replace=False)
data_store.DB.Set(
self.test_row, predicate, "hello3000", timestamp=3000, replace=False)
data_store.DB.Set(
self.test_row, predicate, "hello4000", timestamp=4000, replace=False)
# Check its there
self.CheckLast(predicate, "hello4000", 4000)
self.CheckLength(predicate, 4)
# Delete timestamps between 0 and 1500.
data_store.DB.DeleteAttributes(
self.test_row, [predicate], start=0, end=1500, sync=True)
self.CheckLast(predicate, "hello4000", 4000)
self.CheckLength(predicate, 3)
# Delete timestamps between 3000 and 4500.
data_store.DB.DeleteAttributes(
self.test_row, [predicate], start=3000, end=4500, sync=True)
self.CheckLast(predicate, "hello2000", 2000)
self.CheckLength(predicate, 1)
# Delete everything.
data_store.DB.DeleteAttributes(
self.test_row, [predicate], start=0, end=5000, sync=True)
self.CheckLast(predicate, None, 0)
self.CheckLength(predicate, 0)
@DeletionTest
def testDeleteSubject(self):
predicate = "metadata:tspredicate"
data_store.DB.Set(
self.test_row, predicate, "hello1000", timestamp=1000, replace=False)
data_store.DB.DeleteSubject(self.test_row, sync=True)
self.CheckLength(predicate, 0)
# This should work with the sync argument too.
data_store.DB.Set(
self.test_row, predicate, "hello1000", timestamp=1000, replace=False)
data_store.DB.DeleteSubject(self.test_row, sync=True)
self.CheckLength(predicate, 0)
@DeletionTest
def testDeleteSubjects(self):
row_template = "aff4:/deletesubjectstest%d"
rows = [row_template % i for i in range(100)]
predicate = "metadata:tspredicate"
for i, row in enumerate(rows):
data_store.DB.Set(
row, predicate, "hello%d" % i, timestamp=1000, replace=False)
data_store.DB.DeleteSubjects(rows[20:80], sync=True)
res = dict(data_store.DB.MultiResolvePrefix(rows, predicate))
for i in range(100):
if 20 <= i < 80:
# These rows have been deleted.
self.assertNotIn(row_template % i, res)
else:
# These rows should be present.
self.assertIn(row_template % i, res)
def testMultiResolvePrefix(self):
"""tests MultiResolvePrefix."""
rows = self._MakeTimestampedRows()
subjects = dict(
data_store.DB.MultiResolvePrefix(rows, ["metadata:3", "metadata:7"]))
subject_names = sorted(iterkeys(subjects))
self.assertEqual(len(subjects), 2)
self.assertEqual(subject_names, [u"aff4:/row:3", u"aff4:/row:7"])
rows = []
for r in range(1, 6):
row_name = "aff4:/prefix_row_%d" % r
rows.append(row_name)
for i in range(1, 6):
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.Set(
row_name, "metadata:%s" % ("X" * i), str(i), timestamp=timestamp)
subjects = dict(data_store.DB.MultiResolvePrefix(rows, ["metadata:"]))
self.assertItemsEqual(list(iterkeys(subjects)), rows)
row = subjects["aff4:/prefix_row_4"]
self.assertEqual(len(row), 5)
subjects = dict(data_store.DB.MultiResolvePrefix(rows, ["metadata:XXX"]))
self.assertItemsEqual(list(iterkeys(subjects)), rows)
for row in itervalues(subjects):
# Those with 3-5 X's.
self.assertEqual(len(row), 3)
self.assertIn((u"metadata:XXX", "3", 3000), row)
self.assertNotIn((u"metadata:XX", "2", 2000), row)
# Test unicode subjects.
unicode_string = u"this is a uñîcödé string"
attributes = set()
for i in range(5, 10):
attributes.add(("metadata:%s" % i, "data%d" % i))
data_store.DB.MultiSet(unicode_string,
{"metadata:%s" % i: ["data%d" % i]})
result = dict(
data_store.DB.MultiResolvePrefix([unicode_string], ["metadata:"]))
result_set = set((k, v) for k, v, _ in result[unicode_string])
self.assertEqual(result_set, attributes)
def _MakeTimestampedRows(self):
# Make some rows.
rows = []
for i in range(1, 6):
row_name = "aff4:/row:%s" % i
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.Set(row_name, "metadata:%s" % i, i, timestamp=timestamp)
rows.append(row_name)
for i in range(6, 11):
row_name = "aff4:/row:%s" % i
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.MultiSet(
row_name, {"metadata:%s" % i: [i]}, timestamp=timestamp)
rows.append(row_name)
return rows
def _CheckResultTimestamps(self, result, expected_timestamps):
timestamps = []
for predicates in itervalues(result):
for predicate in predicates:
timestamps.append(predicate[2])
self.assertListEqual(sorted(timestamps), sorted(expected_timestamps))
def testMultiResolvePrefixTypePreservation(self):
"""Check result subjects have same format as original calls."""
rows = [
"aff4:/row:str",
u"aff4:/row:unicode",
rdfvalue.RDFURN("aff4:/row:URN"),
"aff4:/row:str",
u"aff4:/row:unicode",
rdfvalue.RDFURN("aff4:/row:URN"),
]
i = 0
for row_name in rows:
timestamp = rdfvalue.RDFDatetime(1000 + i)
data_store.DB.Set(row_name, "metadata:%s" % i, i, timestamp=timestamp)
i += 1
subjects = dict(
data_store.DB.MultiResolvePrefix(
rows, ["metadata:0", "metadata:2", "metadata:4"]))
self.assertEqual(
set([type(s) for s in subjects]), set([type(s) for s in rows]))
self.assertIn(rows[0], subjects)
self.assertIn(rows[2], subjects)
self.assertIn(rows[4], subjects)
def testResolvePrefixResultsOrderedInDecreasingTimestampOrder1(self):
predicate1 = "metadata:predicate1"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order1"
# Set 100 values with increasing timestamps.
for i in range(100):
data_store.DB.Set(
subject, predicate1, str(i), timestamp=i * 1000, replace=False)
# Check that results will be returned in decreasing timestamp order.
# This test along with a next one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = data_store.DB.ResolvePrefix(
subject, predicate1, timestamp=data_store.DB.ALL_TIMESTAMPS)
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(result[result_index], (predicate1, str(i), i * 1000))
def testResolvePrefixResultsOrderedInDecreasingTimestampOrder2(self):
predicate1 = "metadata:predicate1"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order2"
# Set 100 values with timestamps starting in the future and going to
# the past.
for i in reversed(range(100)):
data_store.DB.Set(
subject, predicate1, str(i), timestamp=i * 1000, replace=False)
# Check that results will be returned in decreasing timestamp order.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = data_store.DB.ResolvePrefix(
subject, predicate1, timestamp=data_store.DB.ALL_TIMESTAMPS)
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(result[result_index], (predicate1, str(i), i * 1000))
def testResolvePrefixResultsOrderedInDecreasingTimestampOrderPerColumn1(self):
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order_per_column1"
# Set 100 values with increasing timestamps for each predicate.
for i in range(100):
data_store.DB.Set(
subject, predicate1, str(i), timestamp=i * 1000, replace=False)
data_store.DB.Set(
subject, predicate2, str(i), timestamp=i * 1000, replace=False)
# Check that results will be returned in decreasing timestamp order
# per column.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = list(
data_store.DB.ResolvePrefix(
subject,
"metadata:predicate",
timestamp=data_store.DB.ALL_TIMESTAMPS,
limit=1000))
predicate1_results = [r for r in result if r[0] == predicate1]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate1_results[result_index],
(predicate1, str(i), i * 1000))
predicate2_results = [r for r in result if r[0] == predicate2]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate2_results[result_index],
(predicate2, str(i), i * 1000))
def testResolvePrefixResultsOrderedInDecreasingTimestampOrderPerColumn2(self):
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
subject = "aff4:/test_resolve_regex_results_order_in_dec_order_per_column2"
# Set 100 values for each predicate with timestamps starting in the
# future and going to the past.
for i in reversed(range(100)):
data_store.DB.Set(
subject, predicate1, str(i), timestamp=i * 1000, replace=False)
data_store.DB.Set(
subject, predicate2, str(i), timestamp=i * 1000, replace=False)
# Check that results will be returned in decreasing timestamp order
# per column.
# This test along with a previous one tests that no matter how
# values were set, they will be sorted by timestamp in the decreasing
# order when fetched.
result = list(
data_store.DB.ResolvePrefix(
subject,
"metadata:predicate",
timestamp=data_store.DB.ALL_TIMESTAMPS,
limit=1000))
predicate1_results = [r for r in result if r[0] == predicate1]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate1_results[result_index],
(predicate1, str(i), i * 1000))
predicate2_results = [r for r in result if r[0] == predicate2]
for result_index, i in enumerate(reversed(range(100))):
self.assertEqual(predicate2_results[result_index],
(predicate2, str(i), i * 1000))
def testScanAttribute(self):
data_store.DB.Set("aff4:/A", "aff4:foo", "A value")
for i in range(1, 10):
data_store.DB.Set(
"aff4:/B/" + str(i),
"aff4:foo",
"B " + str(i) + " old value",
timestamp=2000)
data_store.DB.Set(
"aff4:/B/" + str(i),
"aff4:foo",
"B " + str(i) + " value",
timestamp=2000)
data_store.DB.Set(
"aff4:/B/" + str(i),
"aff4:foo",
"B " + str(i) + " older value",
timestamp=1900,
replace=False)
# Something with a different attribute, which should not be included.
data_store.DB.Set(
"aff4:/B/1.1", "aff4:foo2", "B 1.1 other value", timestamp=2000)
data_store.DB.Set("aff4:/C", "aff4:foo", "C value")
values = [(r[1], r[2])
for r in data_store.DB.ScanAttribute("aff4:/B", "aff4:foo")]
self.assertEqual(values,
[(2000, "B " + str(i) + " value") for i in range(1, 10)])
values = [
r[2] for r in data_store.DB.ScanAttribute(
"aff4:/B", "aff4:foo", max_records=2)
]
self.assertEqual(values, ["B " + str(i) + " value" for i in range(1, 3)])
values = [
r[2] for r in data_store.DB.ScanAttribute(
"aff4:/B", "aff4:foo", after_urn="aff4:/B/2")
]
self.assertEqual(values, ["B " + str(i) + " value" for i in range(3, 10)])
values = [
r[2] for r in data_store.DB.ScanAttribute(
"aff4:/B",
u"aff4:foo",
after_urn=rdfvalue.RDFURN("aff4:/B/2"),
max_records=2)
]
self.assertEqual(values, ["B " + str(i) + " value" for i in range(3, 5)])
values = [r[2] for r in data_store.DB.ScanAttribute("aff4:/", "aff4:foo")]
self.assertEqual(
values, ["A value"] + ["B " + str(i) + " value" for i in range(1, 10)
] + ["C value"])
values = [r[2] for r in data_store.DB.ScanAttribute("", "aff4:foo")]
self.assertEqual(
values, ["A value"] + ["B " + str(i) + " value" for i in range(1, 10)
] + ["C value"])
data_store.DB.Set("aff4:/files/hash/generic/sha1/", "aff4:hash", "h1")
data_store.DB.Set("aff4:/files/hash/generic/sha1/AAAAA", "aff4:hash", "h2")
data_store.DB.Set("aff4:/files/hash/generic/sha1/AAAAB", "aff4:hash", "h3")
data_store.DB.Set("aff4:/files/hash/generic/sha256/", "aff4:hash", "h4")
data_store.DB.Set("aff4:/files/hash/generic/sha256/AAAAA", "aff4:hash",
"h5")
data_store.DB.Set("aff4:/files/hash/generic/sha256/AAAAB", "aff4:hash",
"h6")
data_store.DB.Set("aff4:/files/hash/generic/sha90000", "aff4:hash", "h7")
(value, _) = data_store.DB.Resolve("aff4:/files/hash/generic/sha90000",
"aff4:hash")
self.assertEqual(value, "h7")
values = [
r[2]
for r in data_store.DB.ScanAttribute("aff4:/files/hash", "aff4:hash")
]
self.assertEqual(values, ["h1", "h2", "h3", "h4", "h5", "h6", "h7"])
values = [
r[2] for r in data_store.DB.ScanAttribute(
"aff4:/files/hash", "aff4:hash", relaxed_order=True)
]
self.assertEqual(sorted(values), ["h1", "h2", "h3", "h4", "h5", "h6", "h7"])
def testScanAttributes(self):
for i in range(0, 7):
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:foo",
"C foo " + str(i) + " value",
timestamp=10000)
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:foo",
"C foo " + str(i) + " old value",
timestamp=9000,
replace=False)
for i in range(3, 10):
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:bar",
"C bar " + str(i) + " value",
timestamp=15000)
data_store.DB.Set(
"aff4:/C/" + str(i),
"aff4:bar",
"C bar " + str(i) + " old value",
timestamp=9500,
replace=False)
data_store.DB.Set("aff4:/C/5a", "aff4:baz", "C baz value", timestamp=9800)
results = list(
data_store.DB.ScanAttributes("aff4:/C", ["aff4:foo", "aff4:bar"]))
self.assertEqual(len(results), 10)
self.assertEqual([s for s, _ in results],
["aff4:/C/" + str(i) for i in range(10)])
self.assertEqual(results[0][1], {"aff4:foo": (10000, "C foo 0 value")})
self.assertEqual(results[5][1], {
"aff4:bar": (15000, "C bar 5 value"),
"aff4:foo": (10000, "C foo 5 value")
})
self.assertEqual(results[9][1], {"aff4:bar": (15000, "C bar 9 value")})
results = list(
data_store.DB.ScanAttributes(
"aff4:/C", ["aff4:foo", "aff4:bar"], max_records=5))
self.assertEqual(len(results), 5)
def testRDFDatetimeTimestamps(self):
test_rows = self._MakeTimestampedRows()
# Make sure all timestamps are set correctly.
result = dict(data_store.DB.MultiResolvePrefix(test_rows, ["metadata:"]))
self._CheckResultTimestamps(result, range(1000, 11000, 1000))
# Now MultiResolve by timestamp.
timestamp = (rdfvalue.RDFDatetime(3000), rdfvalue.RDFDatetime(8000))
result = dict(
data_store.DB.MultiResolvePrefix(
test_rows, ["metadata:"], timestamp=timestamp))
# Timestamp selection is inclusive so we should have 3k-8k.
self._CheckResultTimestamps(result, range(3000, 9000, 1000))
# Now test timestamped attributes.
row_name = "aff4:/attribute_test_row"
attribute_name = "metadata:test_attribute"
attributes_to_set = {
attribute_name: [
(i, rdfvalue.RDFDatetime(i)) for i in range(1000, 11000, 1000)
]
}
data_store.DB.MultiSet(row_name, attributes_to_set, replace=False)
# Make sure all timestamps are set correctly.
result = dict(
data_store.DB.MultiResolvePrefix(
[row_name], ["metadata:"], timestamp=data_store.DB.ALL_TIMESTAMPS))
self._CheckResultTimestamps(result, range(1000, 11000, 1000))
if self.TEST_DELETION:
# Delete some of them.
data_store.DB.DeleteAttributes(
row_name, [attribute_name],
start=rdfvalue.RDFDatetime(2000),
end=rdfvalue.RDFDatetime(4000))
# Make sure that passing start==end deletes that version.
data_store.DB.DeleteAttributes(
row_name, [attribute_name],
start=rdfvalue.RDFDatetime(6000),
end=rdfvalue.RDFDatetime(6000))
result = dict(
data_store.DB.MultiResolvePrefix(
[row_name], ["metadata:"],
timestamp=data_store.DB.ALL_TIMESTAMPS))
expected_timestamps = [1000, 5000, 7000, 8000, 9000, 10000]
self._CheckResultTimestamps(result, expected_timestamps)
@DBSubjectLockTest
def testDBSubjectLocks(self):
"""Test lock locking."""
predicate = u"metadata:predicateÎñţér"
subject = u"aff4:/metadata:rowÎñţér"
# t1 is holding a lock on this row.
with data_store.DB.DBSubjectLock(subject, lease_time=100):
# This means that modification of this row will fail using a different
# lock.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
lease_time=100)
data_store.DB.Set(subject, predicate, "1")
self.assertEqual(data_store.DB.Resolve(subject, predicate)[0], "1")
t2 = data_store.DB.DBSubjectLock(subject, lease_time=100)
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
lease_time=100)
t2.Release()
t3 = data_store.DB.DBSubjectLock(subject, lease_time=100)
self.assertTrue(t3.CheckLease())
t3.Release()
@DBSubjectLockTest
def testDBSubjectLockIndependence(self):
"""Check that locks don't influence each other."""
subject = u"aff4:/metadata:rowÎñţér"
subject2 = u"aff4:/metadata:rowÎñţér2"
t1 = data_store.DB.DBSubjectLock(subject, lease_time=100)
# Check it's locked.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
lease_time=100)
# t2 is holding a lock on this row.
t2 = data_store.DB.DBSubjectLock(subject2, lease_time=100)
# This means that modification of this row will fail using a different
# lock.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject2,
lease_time=100)
t2.Release()
# Subject 1 should still be locked.
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
subject,
lease_time=100)
t1.Release()
@DBSubjectLockTest
def testDBSubjectLockLease(self):
# This needs to be current time or cloud bigtable server will reply with
# deadline exceeded because the RPC is too old.
now = int(time.time())
with test_lib.FakeTime(now):
with data_store.DB.DBSubjectLock(self.lease_row, lease_time=100) as lock:
self.assertEqual(lock.CheckLease(), 100)
self.assertTrue(lock.locked)
# Set our expiry time to now + 2 * 100
lock.UpdateLease(2 * 100)
self.assertEqual(lock.CheckLease(), 2 * 100)
# Deliberately call release twice, __exit__ will also call
lock.Release()
@DBSubjectLockTest
def testDBSubjectLockLeaseExpiryWithExtension(self):
now = int(time.time())
# Cloud Bigtable RPC library doesn't like long, convert to int
lease_time = 100
with test_lib.FakeTime(now):
lock = data_store.DB.DBSubjectLock(self.lease_row, lease_time=lease_time)
self.assertEqual(lock.expires, int(now + lease_time) * 1e6)
lock.UpdateLease(2 * lease_time)
self.assertEqual(lock.expires, int(now + (2 * lease_time)) * 1e6)
# Lock should still be active
with test_lib.FakeTime(now + lease_time + 1):
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
self.lease_row,
lease_time=lease_time)
# Now it is expired
with test_lib.FakeTime(now + (2 * lease_time) + 1):
data_store.DB.DBSubjectLock(self.lease_row, lease_time=lease_time)
@DBSubjectLockTest
def testDBSubjectLockLeaseExpiry(self):
now = int(time.time())
lease_time = 100
with test_lib.FakeTime(now):
lock = data_store.DB.DBSubjectLock(self.lease_row, lease_time=lease_time)
self.assertEqual(lock.CheckLease(), lease_time)
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
self.lease_row,
lease_time=lease_time)
# Almost expired
with test_lib.FakeTime(now + lease_time - 1):
self.assertRaises(
data_store.DBSubjectLockError,
data_store.DB.DBSubjectLock,
self.lease_row,
lease_time=lease_time)
# Expired
after_expiry = now + lease_time + 1
with test_lib.FakeTime(after_expiry):
lock = data_store.DB.DBSubjectLock(self.lease_row, lease_time=lease_time)
self.assertEqual(lock.CheckLease(), lease_time)
self.assertEqual(lock.expires, int((after_expiry + lease_time) * 1e6))
@DBSubjectLockTest
def testLockRetryWrapperTemporaryFailure(self):
"""Two failed attempts to get the lock, then a succcess."""
lock = mock.MagicMock()
with mock.patch.object(time, "sleep", return_value=None) as mock_time:
with mock.patch.object(
data_store.DB,
"DBSubjectLock",
side_effect=[
data_store.DBSubjectLockError("1"),
data_store.DBSubjectLockError("2"), lock
]):
lock = data_store.DB.LockRetryWrapper("aff4:/something")
# We slept and retried twice
self.assertEqual(mock_time.call_count, 2)
lock.Release()
@DBSubjectLockTest
def testLockRetryWrapperNoBlock(self):
subject = "aff4:/noblocklock"
lock = data_store.DB.DBSubjectLock(subject, lease_time=100)
with mock.patch.object(time, "sleep", return_value=None) as mock_time:
with self.assertRaises(data_store.DBSubjectLockError):
data_store.DB.LockRetryWrapper(subject, lease_time=100, blocking=False)
self.assertEqual(mock_time.call_count, 0)
lock.Release()
@DBSubjectLockTest
def testLockRetryWrapperCompleteFailure(self):
subject = "aff4:/subject"
# We need to sync this delete or it happens after we take the lock and
# messes up the test.
data_store.DB.DeleteSubject(subject, sync=True)
lock = data_store.DB.DBSubjectLock(subject, lease_time=100)
# By mocking out sleep we can ensure all retries are exhausted.
with mock.patch.object(time, "sleep", return_value=None):
with self.assertRaises(data_store.DBSubjectLockError):
data_store.DB.LockRetryWrapper(
subject,
lease_time=100,
retrywrap_timeout=1,
retrywrap_max_timeout=3)
lock.Release()
def testTimestamps(self):
"""Check that timestamps are reasonable."""
predicate = "metadata:predicate"
subject = "aff4:test_timestamps"
# Extend the range of valid timestamps returned from the table to account
# for potential clock skew.
start = int(time.time() - 60) * 1e6
data_store.DB.Set(subject, predicate, "1")
stored, ts = data_store.DB.Resolve(subject, predicate)
# Check the time is reasonable
end = int(time.time() + 60) * 1e6
self.assertTrue(ts >= start and ts <= end)
self.assertEqual(stored, "1")
def testSpecificTimestamps(self):
"""Check arbitrary timestamps can be specified."""
predicate = "metadata:predicate"
subject = "aff4:/test_specific_timestamps"
# Check we can specify a timestamp
data_store.DB.Set(subject, predicate, "2", timestamp=1000)
stored, ts = data_store.DB.Resolve(subject, predicate)
# Check the time is reasonable
self.assertEqual(ts, 1000)
self.assertEqual(stored, "2")
def testNewestTimestamps(self):
"""Check that NEWEST_TIMESTAMP works as expected."""
predicate1 = "metadata:predicate1"
predicate2 = "metadata:predicate2"
# Check we can specify a timestamp
data_store.DB.Set(
self.test_row, predicate1, "1.1", timestamp=10000, replace=False)
data_store.DB.Set(
self.test_row, predicate1, "1.2", timestamp=20000, replace=False)
data_store.DB.Set(
self.test_row, predicate2, "2.1", timestamp=11000, replace=False)
data_store.DB.Set(
self.test_row, predicate2, "2.2", timestamp=22000, replace=False)
result = data_store.DB.ResolvePrefix(
self.test_row, predicate1, timestamp=data_store.DB.ALL_TIMESTAMPS)
# Should return 2 results. Newest should be first.
values = [x[1] for x in result]
self.assertEqual(len(values), 2)
self.assertListEqual(values, ["1.2", "1.1"])
times = [x[2] for x in result]
self.assertListEqual(times, [20000, 10000])
result = data_store.DB.ResolvePrefix(
self.test_row, predicate1, timestamp=data_store.DB.NEWEST_TIMESTAMP)
# Should return 1 result - the most recent.
self.assertEqual(len(result), 1)
self.assertEqual(result[0][1], "1.2")
self.assertEqual(result[0][2], 20000)
result = list(
data_store.DB.ResolvePrefix(
self.test_row, "metadata:", timestamp=data_store.DB.ALL_TIMESTAMPS))
self.assertEqual(len(result), 4)
self.assertListEqual([r for r in result if r[0] == "metadata:predicate1"],
[(u"metadata:predicate1", "1.2", 20000),
(u"metadata:predicate1", "1.1", 10000)])
self.assertListEqual([r for r in result if r[0] == "metadata:predicate2"],
[(u"metadata:predicate2", "2.2", 22000),
(u"metadata:predicate2", "2.1", 11000)])
result = list(
data_store.DB.ResolvePrefix(
self.test_row,
"metadata:",
timestamp=data_store.DB.NEWEST_TIMESTAMP))
# Should only return the latest version.
self.assertItemsEqual(result, [(u"metadata:predicate1", "1.2", 20000),
(u"metadata:predicate2", "2.2", 22000)])
@DeletionTest
def testTimestampEdgeCases(self):
row = "aff4:/row"
attribute = "metadata:attribute"
for i in range(4):
# First TS is 0!
timestamp = rdfvalue.RDFDatetime(1000 * i)
data_store.DB.MultiSet(
row, {attribute: [i]}, timestamp=timestamp, replace=False)
rows = data_store.DB.ResolvePrefix(
row, "metadata:", timestamp=data_store.DB.ALL_TIMESTAMPS)
self.assertEqual(len(rows), 4)
self.assertItemsEqual([r[2] for r in rows], [0, 1000, 2000, 3000])
data_store.DB.DeleteAttributes(row, [attribute], start=0, end=0)
rows = data_store.DB.ResolvePrefix(
row, "metadata:", timestamp=data_store.DB.ALL_TIMESTAMPS)
self.assertEqual(len(rows), 3)
self.assertItemsEqual([r[2] for r in rows], [1000, 2000, 3000])
def testResolvePrefix(self):
predicate = "metadata:predicate"
subject = "aff4:/test_resolve_regex_prefix"
# Check we can specify a timestamp
data_store.DB.Set(subject, predicate, "3")
results = [x for x in data_store.DB.ResolvePrefix(subject, "metadata:")]
self.assertEqual(len(results), 1)
# Value
self.assertEqual(results[0][1], "3")
# Predicate
self.assertEqual(results[0][0], predicate)
def testResolveMulti(self):
"""Test regex Multi Resolving works."""
subject = "aff4:/resolve_multi"
predicates = []
predicate_values = []
for i in range(0, 100):
predicate = "metadata:predicate" + str(i)
predicates.append(predicate)
predicate_values.append("Cell " + predicate)
data_store.DB.Set(subject, predicate, "Cell " + predicate, timestamp=1000)
results = [x for x in data_store.DB.ResolveMulti(subject, predicates)]
self.assertEqual(len(results), 100)
self.assertItemsEqual(predicates, [x[0] for x in results])
self.assertItemsEqual(predicate_values, [x[1] for x in results])
# Now try to query for non existent predicates.
predicates = predicates[:10]
predicate_values = predicate_values[:10]
for i in range(10):
predicates.append("metadata:not_existing" + str(i))
results = [x for x in data_store.DB.ResolveMulti(subject, predicates)]
self.assertEqual(10, len(results))
self.assertItemsEqual(predicates[:10], [x[0] for x in results])
self.assertItemsEqual(predicate_values, [x[1] for x in results])
def testBlobs(self):
data = b"randomdata" * 50
identifier = data_store.BLOBS.WriteBlobWithUnknownHash(data)
self.assertTrue(data_store.BLOBS.CheckBlobExists(identifier))
self.assertEqual(data_store.BLOBS.ReadBlob(identifier), data)
empty_digest = rdf_objects.BlobID.FromBlobData(b"")
self.assertFalse(data_store.BLOBS.CheckBlobExists(empty_digest))
self.assertIsNone(data_store.BLOBS.ReadBlob(empty_digest))
def testAFF4BlobImage(self):
# 500k
data = b"randomdata" * 50 * 1024
identifier = data_store.BLOBS.WriteBlobWithUnknownHash(data)
# Now create the image containing the blob.
with aff4.FACTORY.Create("aff4:/C.1235/image", aff4_grr.VFSBlobImage) as fd:
fd.SetChunksize(512 * 1024)
fd.Set(fd.Schema.STAT())
fd.AddBlob(identifier, len(data))
# Check if we can read back the data.
with aff4.FACTORY.Open("aff4:/C.1235/image") as fd:
self.assertEqual(
fd.read(len(data)), data,
"Data read back from aff4image doesn't match.")
def testDotsInDirectory(self):
"""Check that dots work in rows/indexes."""
for directory in [
"aff4:/C.1240/dir", "aff4:/C.1240/dir/a.b", "aff4:/C.1240/dir/a.b/c",
"aff4:/C.1240/dir/b"
]:
aff4.FACTORY.Create(directory, standard.VFSDirectory).Close()
# This must not raise.
aff4.FACTORY.Open("aff4:/C.1240/dir/a.b/c", standard.VFSDirectory)
directory = aff4.FACTORY.Open("aff4:/C.1240/dir")
dirs = list(directory.OpenChildren())
self.assertEqual(2, len(dirs))
self.assertItemsEqual([d.urn.Basename() for d in dirs], ["b", "a.b"])
urns = list(directory.ListChildren())
self.assertEqual(2, len(urns))
self.assertItemsEqual([u.Basename() for u in urns], ["b", "a.b"])
OPEN_WITH_LOCK_NUM_THREADS = 5
OPEN_WITH_LOCK_TRIES_PER_THREAD = 3
OPEN_WITH_LOCK_SYNC_LOCK_SLEEP = 0.2
@pytest.mark.large
@DBSubjectLockTest
def testAFF4OpenWithLock(self):
self.opened = False
self.client_urn = "aff4:/C.0000000000000001"
client = aff4.FACTORY.Create(
self.client_urn, aff4_grr.VFSGRRClient, mode="w")
client.Set(client.Schema.HOSTNAME("client1"))
client.Set(
client.Schema.LEASED_UNTIL(
rdfvalue.RDFDatetime.FromSecondsSinceEpoch(0)))
client.Close()
self.open_failures = 0
self.close_failures = 0
self.results = []
def ParallelThread():
for _ in range(self.OPEN_WITH_LOCK_TRIES_PER_THREAD):
t = time.time()
try:
with aff4.FACTORY.OpenWithLock(
self.client_urn,
blocking=True,
blocking_sleep_interval=self.OPEN_WITH_LOCK_SYNC_LOCK_SLEEP,
blocking_lock_timeout=10):
# We fail if another thread has the object already opened here.
if self.opened:
self.open_failures += 1
self.fail("Double open!")
self.opened = True
logging.info("Thread %s holding lock for 0.2 seconds.",
_thread.get_ident())
time.sleep(0.2)
# We fail if someone has closed the object while we are holding it
# opened.
if not self.opened:
self.close_failures += 1
self.fail("Double close!")
self.results.append(_thread.get_ident())
self.opened = False
return
except aff4.LockError:
logging.info("Lock failed after %s seconds - retying.",
(time.time() - t))
threads = []
for _ in range(self.OPEN_WITH_LOCK_NUM_THREADS):
t = threading.Thread(target=ParallelThread)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(self.open_failures, 0)
self.assertEqual(self.close_failures, 0)
# Make sure all threads got it eventually.
self.assertEqual(len(self.results), self.OPEN_WITH_LOCK_NUM_THREADS)
def _ListedMultiResolvePrefix(self, *args, **kwargs):
return list(data_store.DB.MultiResolvePrefix(*args, **kwargs))
def _ListedResolveMulti(self, *args, **kwargs):
return list(data_store.DB.ResolveMulti(*args, **kwargs))
def _ListedResolvePrefix(self, *args, **kwargs):
return list(data_store.DB.ResolvePrefix(*args, **kwargs))
def _FlushedDeleteSubject(self, *args, **kwargs):
# DeleteSubject is not guaranteed to be synchronous. Make sure that
# we flush data store when testing it.
data_store.DB.DeleteSubject(*args, **kwargs)
data_store.DB.Flush()
def testLimits(self):
# Create 10 rows with 10 attributes each.
subjects = ["aff4:limittest_%d" % i for i in range(10)]
attributes = ["metadata:limittest_%d" % i for i in range(10)]
value_idx = 0
for subject in subjects:
for attribute in attributes:
value = "value_%d" % value_idx
value_idx += 1
data_store.DB.Set(subject, attribute, value)
# ResolvePrefix.
for limit in [1, 2, 5, 10, 100]:
results = data_store.DB.ResolvePrefix(
subjects[0], "metadata:", limit=limit)
self.assertEqual(len(results), min(limit, 10))
# MultiResolvePrefix.
for limit in [1, 2, 5, 9, 10, 11, 25, 100, 120]:
results = dict(
data_store.DB.MultiResolvePrefix(subjects, "metadata:", limit=limit))
all_results = []
for subect_res in itervalues(results):
all_results.extend(subect_res)
self.assertEqual(len(all_results), min(limit, 100))
for limit in [1, 2, 5, 9, 10, 11, 25]:
results = dict(
data_store.DB.MultiResolvePrefix(
subjects, "metadata:limittest_7", limit=limit))
all_results = []
for subect_res in itervalues(results):
all_results.extend(subect_res)
self.assertEqual(len(all_results), min(limit, 10))
# ResolveMulti.
for limit in [1, 2, 5, 9, 10, 11, 25]:
results = list(
data_store.DB.ResolveMulti(subjects[2], attributes, limit=limit))
self.assertEqual(len(results), min(limit, 10))
def testApi(self):
# pyformat: disable
api = [
"CheckRequestsForCompletion",
"CollectionReadIndex",
"CollectionReadStoredTypes",
"CollectionScanItems",
"CreateNotifications",
"DBSubjectLock",
"DeleteAttributes",
"DeleteNotifications",
"DeleteRequest",
"DeleteRequests",
"DeleteSubject",
"DeleteSubjects",
"DeleteWellKnownFlowResponses",
"DestroyFlowStates",
"FetchResponsesForWellKnownFlow",
"GetMutationPool",
"GetNotifications",
"IndexAddKeywordsForName",
"IndexReadPostingLists",
"IndexRemoveKeywordsForName",
"MultiDeleteAttributes",
"MultiDestroyFlowStates",
"MultiResolvePrefix",
"MultiSet",
"ReadCompletedRequests",
"ReadRequestsAndResponses",
"ReadResponses",
"ReadResponsesForRequestId",
"Resolve",
"ResolveMulti",
"ResolvePrefix",
"ScanAttribute",
"ScanAttributes",
"Set",
"StoreRequestsAndResponses",
]
pool_api = [
"CollectionAddIndex",
"CollectionAddItem",
"CollectionAddStoredTypeIndex",
"CreateNotifications",
"DeleteAttributes",
"DeleteSubject",
"DeleteSubjects",
"Flush",
"MultiSet",
"QueueAddItem",
"QueueClaimRecords",
"QueueDeleteRecords",
"QueueRefreshClaims",
"QueueReleaseRecords",
"Set",
"Size",
]
# pyformat: enable
implementation = data_store.DB
reference = data_store.DataStore
for f in api:
implementation_spec = inspect.getargspec(getattr(implementation, f))
reference_spec = inspect.getargspec(getattr(reference, f))
self.assertEqual(
implementation_spec, reference_spec,
"Signatures for function %s not matching: \n%s !=\n%s" %
(f, implementation_spec, reference_spec))
# Check the MutationPool.
implementation = data_store.DB.GetMutationPool()
reference = data_store.MutationPool
for f in pool_api:
implementation_spec = inspect.getargspec(getattr(implementation, f))
reference_spec = inspect.getargspec(getattr(reference, f))
self.assertEqual(
implementation_spec, reference_spec,
"Signatures for function %s not matching: \n%s !=\n%s" %
(f, implementation_spec, reference_spec))
@DeletionTest
def testPoolDeleteSubjects(self):
predicate = "metadata:predicate"
data_store.DB.Set(self.test_row, predicate, "hello")
# Check it's there.
stored, _ = data_store.DB.Resolve(self.test_row, predicate)
self.assertEqual(stored, "hello")
pool = data_store.DB.GetMutationPool()
pool.DeleteAttributes(self.test_row, [predicate])
# Check it's still there.
stored, _ = data_store.DB.Resolve(self.test_row, predicate)
self.assertEqual(stored, "hello")
pool.Flush()
# Now it should be gone.
stored, _ = data_store.DB.Resolve(self.test_row, predicate)
self.assertIsNone(stored)
def testPoolMultiSet(self):
pool = data_store.DB.GetMutationPool()
unicode_string = u"this is a uñîcödé string"
pool.MultiSet(
self.test_row, {
"aff4:size": [1],
"aff4:stored": [unicode_string],
"aff4:unknown_attribute": ["hello"]
})
# Nothing is written before Flush() is called.
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:size")
self.assertIsNone(stored)
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:stored")
self.assertIsNone(stored)
# Flush.
pool.Flush()
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:size")
self.assertEqual(stored, 1)
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:stored")
self.assertEqual(stored, unicode_string)
# Make sure that unknown attributes are stored as bytes.
stored, _ = data_store.DB.Resolve(self.test_row, "aff4:unknown_attribute")
self.assertEqual(stored, "hello")
self.assertEqual(type(stored), str)
@DeletionTest
def testPoolDeleteAttributes(self):
predicate = "metadata:predicate"
pool = data_store.DB.GetMutationPool()
data_store.DB.Set(self.test_row, predicate, "hello")
# Check it's there.
stored, _ = data_store.DB.Resolve(self.test_row, predicate)
self.assertEqual(stored, "hello")
pool.DeleteAttributes(self.test_row, [predicate])
# Check it's still there.
stored, _ = data_store.DB.Resolve(self.test_row, predicate)
self.assertEqual(stored, "hello")
pool.Flush()
stored, _ = data_store.DB.Resolve(self.test_row, predicate)
self.assertIsNone(stored)
def testQueueManager(self):
session_id = rdfvalue.SessionID(flow_name="test")
client_id = test_lib.TEST_CLIENT_ID
request = rdf_flow_runner.RequestState(
id=1,
client_id=client_id,
next_state="TestState",
session_id=session_id)
with queue_manager.QueueManager() as manager:
manager.QueueRequest(request)
# We only have one unanswered request on the queue.
all_requests = list(manager.FetchRequestsAndResponses(session_id))
self.assertEqual(len(all_requests), 1)
self.assertEqual(all_requests[0], (request, []))
# FetchCompletedRequests should return nothing now.
self.assertEqual(list(manager.FetchCompletedRequests(session_id)), [])
# Now queue more requests and responses:
with queue_manager.QueueManager() as manager:
# Start with request 2 - leave request 1 un-responded to.
for request_id in range(2, 5):
request = rdf_flow_runner.RequestState(
id=request_id,
client_id=client_id,
next_state="TestState",
session_id=session_id)
manager.QueueRequest(request)
response_id = None
for response_id in range(1, 10):
# Normal message.
manager.QueueResponse(
rdf_flows.GrrMessage(
session_id=session_id,
request_id=request_id,
response_id=response_id))
# And a status message.
manager.QueueResponse(
rdf_flows.GrrMessage(
session_id=session_id,
request_id=request_id,
response_id=response_id + 1,
type=rdf_flows.GrrMessage.Type.STATUS))
completed_requests = list(manager.FetchCompletedRequests(session_id))
self.assertEqual(len(completed_requests), 3)
# First completed message is request_id = 2 with 10 responses.
self.assertEqual(completed_requests[0][0].id, 2)
# Last message is the status message.
self.assertEqual(completed_requests[0][-1].type,
rdf_flows.GrrMessage.Type.STATUS)
self.assertEqual(completed_requests[0][-1].response_id, 10)
# Now fetch all the completed responses. Set the limit so we only fetch some
# of the responses.
completed_response = list(manager.FetchCompletedResponses(session_id))
self.assertEqual(len(completed_response), 3)
for i, (request, responses) in enumerate(completed_response, 2):
self.assertEqual(request.id, i)
self.assertEqual(len(responses), 10)
# Now check if the limit is enforced. The limit refers to the total number
# of responses to return. We ask for maximum 15 responses, so we should get
# a single request with 10 responses (since 2 requests will exceed the
# limit).
more_data = False
i = 0
try:
partial_response = manager.FetchCompletedResponses(session_id, limit=15)
for i, (request, responses) in enumerate(partial_response, 2):
self.assertEqual(request.id, i)
self.assertEqual(len(responses), 10)
except queue_manager.MoreDataException:
more_data = True
# Returns the first request that is completed.
self.assertEqual(i, 3)
# Make sure the manager told us that more data is available.
self.assertTrue(more_data)
with queue_manager.QueueManager() as manager:
manager.QueueNotification(
rdf_flows.GrrNotification(session_id=session_id, timestamp=100))
stored_notifications = manager.GetNotificationsForAllShards(
session_id.Queue())
self.assertEqual(len(stored_notifications), 1)
@pytest.mark.benchmark
class DataStoreCSVBenchmarks(benchmark_test_lib.MicroBenchmarks):
"""Long running benchmarks where the results are dumped to a CSV file.
These tests are deliberately not named with the test prefix, since they need
to be run individually to get true performance data. Run by specifying the
testname with --test and setting --labels=benchmark.
The CSV output filename will be printed in a log message at the end of the
test.
"""
# What we consider as a big number of attributes.
BIG_NUM_ATTRIBUTES = 1000
units = "s"
# Database counters.
subjects = 0
predicates = 0
values = 0
queries_total = 0 # Total queries.
queries_last_timestep = 0 # Number of the queries up to the last timestep.
steps = 0 # How many steps so far.
query_interval = 3000 # A step is composed of this many queries.
test_name = "" # Current operation being run.
start_time = None
last_time = None
predicate_template = "task:flow%d"
def setUp(self):
super(DataStoreCSVBenchmarks, self).setUp(
["DB Size (KB)", "Queries", "Subjects", "Predicates", "Values"],
["<20", "<10", "<10", "<10", "<10"])
self.start_time = time.time()
self.last_time = self.start_time
def tearDown(self):
self.Register(force=True)
super(DataStoreCSVBenchmarks, self).tearDown()
self.WriteCSV()
def Register(self, force=False):
"""Add a new result line to the benchmark result."""
self.queries_total += 1
if self.queries_total % self.query_interval == 0 or force:
data_store.DB.Flush()
this_time = time.time()
queries_diff = self.queries_total - self.queries_last_timestep
self.queries_last_timestep = self.queries_total
self.last_time = this_time
self.steps += 1
self.AddResult(self.test_name, this_time - self.start_time, self.steps,
data_store.DB.Size() // 1024, queries_diff, self.subjects,
self.predicates, self.values)
def WriteCSV(self, remove=False):
"""Write results to a CSV file."""
writer = csv.Writer(delimiter=u" ")
writer.WriteRow([
u"Benchmark",
u"Time",
u"DBSize",
u"Queries",
u"Subjects",
u"Predicates",
u"Values",
])
for row in self.scratchpad[2:]:
writer.WriteRow([row[0], row[1], row[3], row[4], row[5], row[6], row[7]])
with tempfile.NamedTemporaryFile(suffix=".csv", delete=False) as fp:
fp.write(writer.Content().encode("utf-8"))
logging.info("CSV File is in %s", fp.name)
if remove:
os.unlink(fp.name)
def _RandomlyReadSubject(self, subject, predicates):
"""Read certain parts of a given subject."""
for j, timestamps in iteritems(predicates):
which = self.rand.randint(0, 2)
if which == 0:
# Read all timestamps.
data_store.DB.ResolveMulti(
subject, [self.predicate_template % j],
timestamp=data_store.DB.ALL_TIMESTAMPS)
elif which == 1:
# Read a specific timestamp.
if timestamps:
ts = self.rand.choice(timestamps)
data_store.DB.ResolveMulti(
subject, [self.predicate_template % j], timestamp=(ts, ts))
elif which == 2:
# Read latest.
data_store.DB.Resolve(subject, self.predicate_template % j)
self.Register()
which = self.rand.randint(0, 1)
if which == 0:
# Find all attributes.
data_store.DB.ResolvePrefix(
subject, "task:flow", timestamp=data_store.DB.NEWEST_TIMESTAMP)
elif which == 1:
# Find all attributes with a prefix reducable regex.
data_store.DB.ResolvePrefix(
subject, "task:", timestamp=data_store.DB.NEWEST_TIMESTAMP)
self.Register()
def _ReadRandom(self, subjects, fraction, change_test=True):
"""Randomly read the database."""
if change_test:
self.test_name = "read random %d%%" % fraction
for _ in range(0, int(len(subjects) * fraction / 100.0)):
i = self.rand.choice(list(iterkeys(subjects)))
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
self._RandomlyReadSubject(subject, predicates)
def _UpdateRandom(self, subjects, fraction, change_test=True):
"""Update values/predicates for a given fraction of the subjects."""
if change_test:
self.test_name = "update %d%%" % fraction
new_value = os.urandom(100)
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
if self.rand.randint(0, 100) > fraction:
continue
which = self.rand.randint(0, 2)
if which == 0 or which == 1:
for j, timestamp_info in iteritems(predicates):
number_timestamps = len(timestamp_info)
if which == 0 and len(timestamp_info):
# Update one timestamp'ed value.
data_store.DB.Set(
subject,
self.predicate_template % j,
new_value,
timestamp=timestamp_info[-1])
self.Register()
elif which == 1:
# Add another timestamp.
timestamp_info.append(100 * number_timestamps + 1)
data_store.DB.Set(
subject,
self.predicate_template % j,
new_value,
replace=False,
timestamp=timestamp_info[-1])
self.values += 1
self.Register()
elif which == 2:
# Add an extra predicate.
j = len(predicates)
number_timestamps = self.rand.randrange(1, 3)
ts = [100 * (ts + 1) for ts in range(number_timestamps)]
predicates[j] = ts
self.values += number_timestamps
self.predicates += 1
values = [(new_value, t) for t in ts]
data_store.DB.MultiSet(
subject, {self.predicate_template % j: values},
replace=False,
timestamp=100)
self.Register()
data_store.DB.Flush()
def _DeleteRandom(self, subjects, fraction, change_test=True):
"""Delete predicates/subjects/values at random."""
if change_test:
self.test_name = "delete %d%%" % fraction
subjects_to_delete = []
for i, info in iteritems(subjects):
subject = info["name"]
predicates = info["attrs"]
number_predicates = len(predicates)
do_it = (self.rand.randint(0, 100) <= fraction)
which = self.rand.randint(0, 2)
count_values = 0
predicates_to_delete = []
for j, timestamp_info in iteritems(predicates):
number_timestamps = len(timestamp_info)
count_values += number_timestamps
if do_it:
if which == 0:
# Delete one timestamp'ed value.
if timestamp_info:
ts = timestamp_info[0]
data_store.DB.DeleteAttributes(
subject, [self.predicate_template % j], start=ts, end=ts)
self.values -= 1
timestamp_info.pop(0)
self.Register()
else:
which = 1
if which == 1:
# Delete the attribute itself.
data_store.DB.DeleteAttributes(subject,
[self.predicate_template % j])
self.values -= number_timestamps
self.predicates -= 1
predicates_to_delete.append(j)
self.Register()
if do_it and which == 1:
for j in predicates_to_delete:
del predicates[j]
if do_it and which == 2:
# Delete subject.
data_store.DB.DeleteSubject(subject)
self.predicates -= number_predicates
self.values -= count_values
self.subjects -= 1
subjects_to_delete.append(i)
self.Register()
for i in subjects_to_delete:
del subjects[i]
data_store.DB.Flush()
def _GrowRandomly(self, subjects, fraction, nclients, change_test=True):
"""Adds new clients/subjects to the database."""
if change_test:
self.test_name = "add %d%%" % fraction
how_many = int(len(subjects) * fraction / 100)
new_value = os.urandom(100)
new_subject = max(iteritems(subjects), key=operator.itemgetter(0))[0] + 1
# Generate client names.
clients = [self._GenerateRandomClient() for _ in range(nclients)]
for i in range(new_subject, new_subject + how_many):
client = clients[self.rand.randint(0, nclients - 1)]
self._AddNewSubject(client, subjects, i, new_value)
data_store.DB.Flush()
def _GenerateRandomSubject(self):
n = self.rand.randint(1, 5)
seps = [
self._GenerateRandomString(self.rand.randint(5, 10)) for _ in range(n)
]
return "/".join(seps)
def _AddNewSubject(self, client, subjects, i, value, max_attributes=3):
"""Add a new subject to the database."""
number_predicates = self.rand.randrange(1, max_attributes)
self.subjects += 1
predicates = dict.fromkeys(range(number_predicates))
self.predicates += number_predicates
subject = str(client.Add(self._GenerateRandomSubject()))
for j in range(number_predicates):
number_timestamps = self.rand.randrange(1, 3)
self.values += number_timestamps
ts = [100 * (ts + 1) for ts in range(number_timestamps)]
predicates[j] = ts
values = [(value, t) for t in ts]
data_store.DB.MultiSet(
subject, {self.predicate_template % j: values},
timestamp=100,
replace=False,
sync=False)
self.Register()
info = {"name": subject, "attrs": predicates}
subjects[i] = info
def _ReadLinear(self, subjects, fraction):
"""Linearly read subjects from the database."""
self.test_name = "read linear %d%%" % fraction
for i in subjects:
if self.rand.randint(0, 100) > fraction:
return
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
self._RandomlyReadSubject(subject, predicates)
def _AddManyAttributes(self, subjects, many):
"""Add lots of predicates to a given number of subjects."""
self.test_name = "add +attrs %d" % many
new_value = os.urandom(100)
for _ in range(0, many):
i = self.rand.choice(list(iterkeys(subjects)))
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
how_many = self.rand.randint(self.BIG_NUM_ATTRIBUTES,
self.BIG_NUM_ATTRIBUTES + 1000)
self.predicates += how_many
new_predicate = max(
iteritems(predicates), key=operator.itemgetter(0))[0] + 1
for j in range(new_predicate, new_predicate + how_many):
number_timestamps = self.rand.randrange(1, 3)
ts = [100 * (ts + 1) for ts in range(number_timestamps)]
self.values += number_timestamps
values = [(new_value, t) for t in ts]
predicates[j] = ts
data_store.DB.MultiSet(
subject, {self.predicate_template % j: values},
replace=False,
timestamp=100,
sync=False)
self.Register()
data_store.DB.Flush()
def _RemoveManyAttributes(self, subjects, fraction):
"""Delete all predicates (except 1) from subjects with many predicates."""
self.test_name = "del +attrs %d%%" % fraction
often = 100 // fraction
count = 0
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
number_predicates = len(predicates)
if number_predicates >= self.BIG_NUM_ATTRIBUTES:
count += 1
if count == often:
count = 0
predicates_to_delete = list(iterkeys(predicates))[1:]
values_deleted = sum(len(predicates[x]) for x in predicates_to_delete)
self.values -= values_deleted
self.predicates -= len(predicates_to_delete)
for j in predicates_to_delete:
del predicates[j]
data_store.DB.DeleteAttributes(
subject, [self.predicate_template % j], sync=False)
self.Register()
data_store.DB.Flush()
def _Wipeout(self, subjects):
"""Delete every subject from the database."""
self.test_name = "wipeout"
for i in subjects:
subject = subjects[i]["name"]
predicates = subjects[i]["attrs"]
number_predicates = len(predicates)
count_values = 0
for j in predicates:
count_values += len(predicates[j])
data_store.DB.DeleteSubject(subject)
self.predicates -= number_predicates
self.values -= count_values
self.subjects -= 1
self.Register()
subjects = {}
data_store.DB.Flush()
def _DoMix(self, subjects):
"""Do a mix of database operations."""
self.test_name = "mix"
for _ in range(0, len(subjects) // 2000):
# Do random operations.
op = self.rand.randint(0, 3)
if op == 0:
self._ReadRandom(subjects, 14, False)
elif op == 1:
self._GrowRandomly(subjects, 5, 20, False)
elif op == 2:
self._UpdateRandom(subjects, 10, False)
elif op == 3:
self._DeleteRandom(subjects, 4, False)
def _GenerateRandomClient(self):
return rdf_client.ClientURN("C.%016d" % self.rand.randint(0, (10**16) - 1))
def _FillDatabase(self, nsubjects, nclients, max_attributes=3):
"""Fill the database with a certain number of subjects and clients."""
self.rand = random.Random(0)
self.test_name = "fill"
self.AddResult(self.test_name, 0, self.steps, data_store.DB.Size(), 0, 0, 0,
0)
subjects = dict.fromkeys(range(nsubjects))
value = os.urandom(100)
clients = [self._GenerateRandomClient() for _ in range(nclients)]
for i in subjects:
client = self.rand.choice(clients)
self._AddNewSubject(client, subjects, i, value, max_attributes)
data_store.DB.Flush()
return subjects
def _GenerateRandomString(self, chars):
return "".join(
[self.rand.choice(string.ascii_letters) for _ in range(chars)])
def _AddBlobs(self, howmany, size):
"""Adds 'howmany' blobs with size 'size' kbs."""
self.test_name = "add blobs %dx%dk" % (howmany, size)
count = 0
often = howmany // 10
for count in range(howmany):
data = self._GenerateRandomString(1024 * size)
data_store.WriteBlobWithUnknownHash(data)
if count % often == 0:
# Because adding blobs, takes too long we force the output of
# new results.
self.Register(force=True)
self.Register(force=True)
data_store.DB.Flush()
@pytest.mark.benchmark
def testManySubjectsFewAttrs(self):
"""Database with many subjects with few attributes."""
subjects = self._FillDatabase(25000, 500)
self._ReadLinear(subjects, 50)
self._UpdateRandom(subjects, 50)
self._ReadRandom(subjects, 70)
self._DeleteRandom(subjects, 40)
self._GrowRandomly(subjects, 40, 50)
self._ReadRandom(subjects, 100)
self._DoMix(subjects)
self._Wipeout(subjects)
@pytest.mark.benchmark
def testManySubjectsFewWithManyAttrs(self):
"""Database where a few subjects have many attributes."""
subjects = self._FillDatabase(25000, 500)
self._UpdateRandom(subjects, 50)
self._AddManyAttributes(subjects, 100)
self._ReadRandom(subjects, 30)
# For 1/2 of the subjects with many attributes, remove all but
# one of the attributes.
self._RemoveManyAttributes(subjects, 50)
self._ReadRandom(subjects, 30)
self._UpdateRandom(subjects, 50)
self._Wipeout(subjects)
@pytest.mark.benchmark
def testFewSubjectsManyAttrs(self):
"""Database with a few subjects with many attributes."""
subjects = self._FillDatabase(100, 5)
self._UpdateRandom(subjects, 100)
self._AddManyAttributes(subjects, 50)
self._ReadRandom(subjects, 30)
self._RemoveManyAttributes(subjects, 50)
self._ReadRandom(subjects, 50)
self._Wipeout(subjects)
@pytest.mark.benchmark
def testBlobs(self):
"""Database that stores blobs of increasing size."""
subjects = self._FillDatabase(10000, 200)
def _ReadUpdate():
self._ReadRandom(subjects, 75)
self._UpdateRandom(subjects, 20)
_ReadUpdate()
self._AddBlobs(50, 512)
_ReadUpdate()
self._AddBlobs(50, 2048)
_ReadUpdate()
self._AddBlobs(50, 10240)
_ReadUpdate()
self._AddBlobs(20, 10240 * 10)
_ReadUpdate()
@pytest.mark.benchmark
def testManySubjectsManyAttrs(self):
"""Database with many subjects with many attributes."""
subjects = self._FillDatabase(25000, 500, 50)
self._ReadLinear(subjects, 50)
self._UpdateRandom(subjects, 50)
self._ReadRandom(subjects, 50)
self._DeleteRandom(subjects, 40)
self._GrowRandomly(subjects, 40, 50)
self._ReadRandom(subjects, 50)
self._DoMix(subjects)
self._Wipeout(subjects)
@pytest.mark.benchmark
class DataStoreBenchmarks(benchmark_test_lib.MicroBenchmarks):
"""Datastore micro benchmarks.
These tests should be run with --labels=benchmark
"""
queue = rdfvalue.RDFURN("BENCHMARK")
units = "s"
def setUp(self):
super(DataStoreBenchmarks, self).setUp()
self.tp = threadpool.ThreadPool.Factory("test_pool", 50)
self.tp.Start()
def tearDown(self):
super(DataStoreBenchmarks, self).tearDown()
self.tp.Stop()
def GenerateFiles(self, client_id, n, directory="dir/dir"):
res = []
for i in range(n):
res.append(
rdf_client_fs.StatEntry(
aff4path="aff4:/%s/fs/os/%s/file%d" % (client_id, directory, i),
st_mode=33261,
st_ino=1026267,
st_dev=51713,
st_nlink=1,
st_uid=0,
st_gid=0,
st_size=60064,
st_atime=1308964274,
st_mtime=1285093975,
st_ctime=1299502221,
st_blocks=128,
st_blksize=4096,
st_rdev=0,
pathspec=rdf_paths.PathSpec(
path="/dir/dir/file%d" % i, pathtype=0)))
return res
def StartFlow(self, client_id):
flow_id = flow.StartAFF4Flow(
client_id=client_id,
flow_name=filesystem.ListDirectory.__name__,
queue=self.queue,
pathspec=rdf_paths.PathSpec(
path="/",
pathtype="OS",
))
self.flow_ids.append(flow_id)
messages = []
for d in range(self.nr_dirs):
messages += self.GenerateFiles(client_id, self.files_per_dir,
"dir/dir%d" % d)
messages.append(rdf_flows.GrrStatus())
with queue_manager.QueueManager() as flow_manager:
for i, payload in enumerate(messages):
msg = rdf_flows.GrrMessage(
session_id=flow_id,
request_id=1,
response_id=1 + i,
auth_state=rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED,
payload=payload)
if isinstance(payload, rdf_flows.GrrStatus):
msg.type = 1
flow_manager.QueueResponse(msg)
nr_clients = 4
nr_dirs = 4
files_per_dir = 500
def _GenerateRandomString(self, chars):
return "".join(
[self.rand.choice(string.ascii_letters) for _ in range(chars)])
# Constants to control the size of testCollections. These numbers run in a
# reasonable amount of time for a unit test [O(20s)] on most data stores.
RECORDS = 5000
RECORD_SIZE = 1000
READ_COUNT = 50
BIG_READ_SIZE = 25
# The sequential collection index is only computed for records 5m old, so we
# write records this far in the past in order to force index creation.
INDEX_DELAY = rdfvalue.Duration("10m")
@pytest.mark.benchmark
def testCollections(self):
self.rand = random.Random(42)
#
# Populate and exercise an indexed sequential collection.
#
urn = rdfvalue.RDFURN("aff4:/test_seq_collection")
indexed_collection = StringSequentialCollection(urn)
start_time = time.time()
with data_store.DB.GetMutationPool() as pool:
for _ in range(self.RECORDS):
indexed_collection.Add(
rdfvalue.RDFString(self._GenerateRandomString(self.RECORD_SIZE)),
timestamp=rdfvalue.RDFDatetime.Now() - self.INDEX_DELAY,
mutation_pool=pool)
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. Add (size %d)" % self.RECORD_SIZE, elapsed_time,
self.RECORDS)
start_time = time.time()
self.assertEqual(len(indexed_collection), self.RECORDS)
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. Read to end", elapsed_time, 1)
start_time = time.time()
for _ in range(self.READ_COUNT):
for _ in indexed_collection.GenerateItems(
offset=self.rand.randint(0, self.RECORDS - 1)):
break
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. random 1 record reads", elapsed_time,
self.READ_COUNT)
start_time = time.time()
for _ in range(self.READ_COUNT):
count = 0
for _ in indexed_collection.GenerateItems(
offset=self.rand.randint(0, self.RECORDS - self.BIG_READ_SIZE)):
count += 1
if count >= self.BIG_READ_SIZE:
break
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. random %d record reads" % self.BIG_READ_SIZE,
elapsed_time, self.READ_COUNT)
start_time = time.time()
for _ in indexed_collection.GenerateItems():
pass
elapsed_time = time.time() - start_time
self.AddResult("Seq. Coll. full sequential read", elapsed_time, 1)
@pytest.mark.benchmark
def testSimulateFlows(self):
self.flow_ids = []
self.units = "s"
client_ids = ["C.%016X" % j for j in range(1, self.nr_clients + 1)]
start_time = time.time()
for client_id in client_ids:
self.tp.AddTask(self.StartFlow, (client_id,))
self.tp.Join()
notifications = [
rdf_flows.GrrNotification(session_id=f) for f in self.flow_ids
]
with queue_manager.QueueManager() as manager:
manager.MultiNotifyQueue(notifications)
time_used = time.time() - start_time
self.AddResult(
"Generate Messages (%d clients, %d files)" %
(self.nr_clients, self.nr_dirs * self.files_per_dir), time_used, 1)
my_worker = worker_lib.GRRWorker(queues=[self.queue], token=self.token)
start_time = time.time()
while my_worker.RunOnce():
pass
my_worker.thread_pool.Join()
time_used = time.time() - start_time
self.AddResult("Process Messages", time_used, 1)
@pytest.mark.benchmark
def testMicroBenchmarks(self):
# Tests run in arbitrary order but for the benchmarks, the order makes a
# difference so we call them all from one test here.
self.n = 1000
self.small_n = self.n // 100
self.units = "ms"
self.BenchmarkWriting()
self.BenchmarkReading()
self.BenchmarkWritingThreaded()
self.BenchmarkReadingThreaded()
self.BenchmarkAFF4Locks()
def BenchmarkWriting(self):
subject_template = "aff4:/row%d"
predicate_template = "task:flow%d"
value = os.urandom(100)
large_value = os.urandom(10 * 1024 * 1024)
start_time = time.time()
for i in range(self.n):
data_store.DB.Set(subject_template % i, "task:flow", value)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set rows", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in range(self.n):
data_store.DB.Set("aff4:/somerow", predicate_template % i, value)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set attributes", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in range(self.n):
data_store.DB.Set("aff4:/somerow", "task:someflow", value, replace=False)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set versions", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in range(self.small_n):
data_store.DB.Set(
"aff4:/largerow%d" % i, "task:largeflow", large_value, replace=False)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Set large values", (end_time - start_time) / self.small_n,
self.small_n)
def BenchmarkReading(self):
subject_template = "aff4:/row%d"
predicate_template = "task:flow%d"
start_time = time.time()
for i in range(self.n):
data_store.DB.Resolve(subject_template % i, "task:flow")
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get rows", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in range(self.n):
data_store.DB.Resolve("aff4:/somerow", predicate_template % i)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get attributes", (end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in range(self.small_n):
data_store.DB.ResolvePrefix(
"aff4:/somerow",
"task:someflow",
timestamp=data_store.DB.ALL_TIMESTAMPS)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get all versions", (end_time - start_time) / self.small_n,
self.small_n)
start_time = time.time()
for i in range(self.small_n):
res = data_store.DB.ResolvePrefix(
"aff4:/largerow%d" % i,
"task:largeflow",
timestamp=data_store.DB.ALL_TIMESTAMPS)
self.assertEqual(len(res), 1)
self.assertEqual(len(res[0][1]), 10 * 1024 * 1024)
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Get large values", (end_time - start_time) / self.small_n,
self.small_n)
def BenchmarkWritingThreaded(self):
subject_template = "aff4:/threadedrow%d"
predicate_template = "task:threadedflow%d"
value = os.urandom(100)
large_value = os.urandom(10 * 1024 * 1024)
start_time = time.time()
for i in range(self.n):
self.tp.AddTask(data_store.DB.Set,
(subject_template % i, "task:threadedflow", value, None))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set rows", (end_time - start_time) / self.n,
self.n)
start_time = time.time()
for i in range(self.n):
self.tp.AddTask(
data_store.DB.Set,
("aff4:/somerowthreaded", predicate_template % i, value, None))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set attributes",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in range(self.n):
self.tp.AddTask(data_store.DB.Set,
("aff4:/somerowthreaded", "task:someflowthreaded", value,
None, False))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set versions",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in range(self.small_n):
self.tp.AddTask(data_store.DB.Set,
("aff4:/threadedlargerow%d" % i, "task:largeflowthreaded",
large_value, None, False))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Set large values",
(end_time - start_time) / self.small_n, self.small_n)
def ResolvePrefixAndCheck(self, subject, predicate, expected_items=1000):
res = data_store.DB.ResolvePrefix(
subject, predicate, timestamp=data_store.DB.ALL_TIMESTAMPS)
self.assertEqual(len(list(res)), expected_items)
def BenchmarkReadingThreaded(self):
subject_template = "aff4:/threadedrow%d"
predicate_template = "task:threadedflow%d"
start_time = time.time()
for i in range(self.n):
self.tp.AddTask(data_store.DB.Resolve,
(subject_template % i, "task:threadedflow"))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get rows", (end_time - start_time) / self.n,
self.n)
start_time = time.time()
for i in range(self.n):
self.tp.AddTask(data_store.DB.Resolve,
("aff4:/somerowthreaded", predicate_template % i))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get attributes",
(end_time - start_time) / self.n, self.n)
start_time = time.time()
for i in range(self.small_n):
self.tp.AddTask(self.ResolvePrefixAndCheck,
("aff4:/somerowthreaded", "task:someflowthreaded"))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get all versions",
(end_time - start_time) / self.small_n, self.small_n)
start_time = time.time()
for i in range(self.small_n):
self.tp.AddTask(
self.ResolvePrefixAndCheck,
("aff4:/threadedlargerow%d" % i, "task:largeflowthreaded", 1))
self.tp.Join()
data_store.DB.Flush()
end_time = time.time()
self.AddResult("Multithreaded: Get large values",
(end_time - start_time) / self.small_n, self.small_n)
def BenchmarkAFF4Locks(self):
client_id = "C.%016X" % 999
# Write some data to read.
client = aff4.FACTORY.Create(client_id, aff4_grr.VFSGRRClient, mode="w")
client.Set(client.Schema.HOSTNAME("client1"))
client.Close()
cl = aff4.FACTORY.Open(client_id)
self.assertEqual(cl.Get(cl.Schema.HOSTNAME), "client1")
# Collect exceptions in threads.
self.fails = []
def Thread():
try:
# Using blocking_lock_timeout of 10 minutes to avoid possible
# timeouts when running tests on slow hardware.
with aff4.FACTORY.OpenWithLock(
client_id,
blocking=True,
blocking_sleep_interval=0.2,
blocking_lock_timeout=600) as client:
self.assertEqual(client.Get(client.Schema.HOSTNAME), "client1")
except Exception as e: # pylint: disable=broad-except
self.fails.append(e)
start_time = time.time()
for _ in range(self.n):
Thread()
end_time = time.time()
self.AddResult("OpenWithLock", (end_time - start_time) / self.n, self.n)
self.assertEqual(len(self.fails), 0)
start_time = time.time()
for _ in range(self.n):
self.tp.AddTask(Thread, ())
self.tp.Join()
end_time = time.time()
self.AddResult("Multithreaded: OpenWithLock",
(end_time - start_time) / self.n, self.n)
self.assertEqual(len(self.fails), 0)
| 34.2136 | 80 | 0.648982 |
58697a00667266180fca70d1ef1cd183ba5d0c1f | 40,604 | py | Python | boto/codedeploy/layer1.py | ContextLogic/boto | 108d3c653f0a3c20794da930eee9df1b27774657 | [
"MIT"
] | null | null | null | boto/codedeploy/layer1.py | ContextLogic/boto | 108d3c653f0a3c20794da930eee9df1b27774657 | [
"MIT"
] | 3 | 2020-01-27T22:40:44.000Z | 2020-10-06T16:22:11.000Z | boto/codedeploy/layer1.py | ContextLogic/boto | 108d3c653f0a3c20794da930eee9df1b27774657 | [
"MIT"
] | 2 | 2020-05-14T05:59:29.000Z | 2020-05-14T07:27:31.000Z | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.aws_connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.codedeploy import exceptions
class CodeDeployConnection(AWSQueryConnection):
"""
AWS CodeDeploy **Overview**
This is the AWS CodeDeploy API Reference. This guide provides
descriptions of the AWS CodeDeploy APIs. For additional
information, see the `AWS CodeDeploy User Guide`_.
**Using the APIs**
You can use the AWS CodeDeploy APIs to work with the following
items:
+ Applications , which are unique identifiers that AWS CodeDeploy
uses to ensure that the correct combinations of revisions,
deployment configurations, and deployment groups are being
referenced during deployments. You can work with applications by
calling CreateApplication, DeleteApplication, GetApplication,
ListApplications, BatchGetApplications, and UpdateApplication to
create, delete, and get information about applications, and to
change information about an application, respectively.
+ Deployment configurations , which are sets of deployment rules
and deployment success and failure conditions that AWS CodeDeploy
uses during deployments. You can work with deployment
configurations by calling CreateDeploymentConfig,
DeleteDeploymentConfig, GetDeploymentConfig, and
ListDeploymentConfigs to create, delete, and get information about
deployment configurations, respectively.
+ Deployment groups , which represent groups of Amazon EC2
instances to which application revisions can be deployed. You can
work with deployment groups by calling CreateDeploymentGroup,
DeleteDeploymentGroup, GetDeploymentGroup, ListDeploymentGroups,
and UpdateDeploymentGroup to create, delete, and get information
about single and multiple deployment groups, and to change
information about a deployment group, respectively.
+ Deployment instances (also known simply as instances ), which
represent Amazon EC2 instances to which application revisions are
deployed. Deployment instances are identified by their Amazon EC2
tags or Auto Scaling group names. Deployment instances belong to
deployment groups. You can work with deployment instances by
calling GetDeploymentInstance and ListDeploymentInstances to get
information about single and multiple deployment instances,
respectively.
+ Deployments , which represent the process of deploying revisions
to deployment groups. You can work with deployments by calling
CreateDeployment, GetDeployment, ListDeployments,
BatchGetDeployments, and StopDeployment to create and get
information about deployments, and to stop a deployment,
respectively.
+ Application revisions (also known simply as revisions ), which
are archive files that are stored in Amazon S3 buckets or GitHub
repositories. These revisions contain source content (such as
source code, web pages, executable files, any deployment scripts,
and similar) along with an Application Specification file (AppSpec
file). (The AppSpec file is unique to AWS CodeDeploy; it defines a
series of deployment actions that you want AWS CodeDeploy to
execute.) An application revision is uniquely identified by its
Amazon S3 object key and its ETag, version, or both. Application
revisions are deployed to deployment groups. You can work with
application revisions by calling GetApplicationRevision,
ListApplicationRevisions, and RegisterApplicationRevision to get
information about application revisions and to inform AWS
CodeDeploy about an application revision, respectively.
"""
APIVersion = "2014-10-06"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "codedeploy.us-east-1.amazonaws.com"
ServiceName = "codedeploy"
TargetPrefix = "CodeDeploy_20141006"
ResponseError = JSONResponseError
_faults = {
"InvalidDeploymentIdException": exceptions.InvalidDeploymentIdException,
"InvalidDeploymentGroupNameException": exceptions.InvalidDeploymentGroupNameException,
"DeploymentConfigAlreadyExistsException": exceptions.DeploymentConfigAlreadyExistsException,
"InvalidRoleException": exceptions.InvalidRoleException,
"RoleRequiredException": exceptions.RoleRequiredException,
"DeploymentGroupAlreadyExistsException": exceptions.DeploymentGroupAlreadyExistsException,
"DeploymentConfigLimitExceededException": exceptions.DeploymentConfigLimitExceededException,
"InvalidNextTokenException": exceptions.InvalidNextTokenException,
"InvalidDeploymentConfigNameException": exceptions.InvalidDeploymentConfigNameException,
"InvalidSortByException": exceptions.InvalidSortByException,
"InstanceDoesNotExistException": exceptions.InstanceDoesNotExistException,
"InvalidMinimumHealthyHostValueException": exceptions.InvalidMinimumHealthyHostValueException,
"ApplicationLimitExceededException": exceptions.ApplicationLimitExceededException,
"ApplicationNameRequiredException": exceptions.ApplicationNameRequiredException,
"InvalidEC2TagException": exceptions.InvalidEC2TagException,
"DeploymentDoesNotExistException": exceptions.DeploymentDoesNotExistException,
"DeploymentLimitExceededException": exceptions.DeploymentLimitExceededException,
"InvalidInstanceStatusException": exceptions.InvalidInstanceStatusException,
"RevisionRequiredException": exceptions.RevisionRequiredException,
"InvalidBucketNameFilterException": exceptions.InvalidBucketNameFilterException,
"DeploymentGroupLimitExceededException": exceptions.DeploymentGroupLimitExceededException,
"DeploymentGroupDoesNotExistException": exceptions.DeploymentGroupDoesNotExistException,
"DeploymentConfigNameRequiredException": exceptions.DeploymentConfigNameRequiredException,
"DeploymentAlreadyCompletedException": exceptions.DeploymentAlreadyCompletedException,
"RevisionDoesNotExistException": exceptions.RevisionDoesNotExistException,
"DeploymentGroupNameRequiredException": exceptions.DeploymentGroupNameRequiredException,
"DeploymentIdRequiredException": exceptions.DeploymentIdRequiredException,
"DeploymentConfigDoesNotExistException": exceptions.DeploymentConfigDoesNotExistException,
"BucketNameFilterRequiredException": exceptions.BucketNameFilterRequiredException,
"InvalidTimeRangeException": exceptions.InvalidTimeRangeException,
"ApplicationDoesNotExistException": exceptions.ApplicationDoesNotExistException,
"InvalidRevisionException": exceptions.InvalidRevisionException,
"InvalidSortOrderException": exceptions.InvalidSortOrderException,
"InvalidOperationException": exceptions.InvalidOperationException,
"InvalidAutoScalingGroupException": exceptions.InvalidAutoScalingGroupException,
"InvalidApplicationNameException": exceptions.InvalidApplicationNameException,
"DescriptionTooLongException": exceptions.DescriptionTooLongException,
"ApplicationAlreadyExistsException": exceptions.ApplicationAlreadyExistsException,
"InvalidDeployedStateFilterException": exceptions.InvalidDeployedStateFilterException,
"DeploymentNotStartedException": exceptions.DeploymentNotStartedException,
"DeploymentConfigInUseException": exceptions.DeploymentConfigInUseException,
"InstanceIdRequiredException": exceptions.InstanceIdRequiredException,
"InvalidKeyPrefixFilterException": exceptions.InvalidKeyPrefixFilterException,
"InvalidDeploymentStatusException": exceptions.InvalidDeploymentStatusException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CodeDeployConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def batch_get_applications(self, application_names=None):
"""
Gets information about one or more applications.
:type application_names: list
:param application_names: A list of application names, with multiple
application names separated by spaces.
"""
params = {}
if application_names is not None:
params['applicationNames'] = application_names
return self.make_request(action='BatchGetApplications',
body=json.dumps(params))
def batch_get_deployments(self, deployment_ids=None):
"""
Gets information about one or more deployments.
:type deployment_ids: list
:param deployment_ids: A list of deployment IDs, with multiple
deployment IDs separated by spaces.
"""
params = {}
if deployment_ids is not None:
params['deploymentIds'] = deployment_ids
return self.make_request(action='BatchGetDeployments',
body=json.dumps(params))
def create_application(self, application_name):
"""
Creates a new application.
:type application_name: string
:param application_name: The name of the application. This name must be
unique within the AWS user account.
"""
params = {'applicationName': application_name, }
return self.make_request(action='CreateApplication',
body=json.dumps(params))
def create_deployment(self, application_name, deployment_group_name=None,
revision=None, deployment_config_name=None,
description=None,
ignore_application_stop_failures=None):
"""
Deploys an application revision to the specified deployment
group.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The deployment group's name.
:type revision: dict
:param revision: The type of revision to deploy, along with information
about the revision's location.
:type deployment_config_name: string
:param deployment_config_name: The name of an existing deployment
configuration within the AWS user account.
If not specified, the value configured in the deployment group will be
used as the default. If the deployment group does not have a
deployment configuration associated with it, then
CodeDeployDefault.OneAtATime will be used by default.
:type description: string
:param description: A comment about the deployment.
:type ignore_application_stop_failures: boolean
:param ignore_application_stop_failures: If set to true, then if the
deployment causes the ApplicationStop deployment lifecycle event to
fail to a specific instance, the deployment will not be considered
to have failed to that instance at that point and will continue on
to the BeforeInstall deployment lifecycle event.
If set to false or not specified, then if the deployment causes the
ApplicationStop deployment lifecycle event to fail to a specific
instance, the deployment will stop to that instance, and the
deployment to that instance will be considered to have failed.
"""
params = {'applicationName': application_name, }
if deployment_group_name is not None:
params['deploymentGroupName'] = deployment_group_name
if revision is not None:
params['revision'] = revision
if deployment_config_name is not None:
params['deploymentConfigName'] = deployment_config_name
if description is not None:
params['description'] = description
if ignore_application_stop_failures is not None:
params['ignoreApplicationStopFailures'] = ignore_application_stop_failures
return self.make_request(action='CreateDeployment',
body=json.dumps(params))
def create_deployment_config(self, deployment_config_name,
minimum_healthy_hosts=None):
"""
Creates a new deployment configuration.
:type deployment_config_name: string
:param deployment_config_name: The name of the deployment configuration
to create.
:type minimum_healthy_hosts: dict
:param minimum_healthy_hosts: The minimum number of healthy instances
that should be available at any time during the deployment. There
are two parameters expected in the input: type and value.
The type parameter takes either of the following values:
+ HOST_COUNT: The value parameter represents the minimum number of
healthy instances, as an absolute value.
+ FLEET_PERCENT: The value parameter represents the minimum number of
healthy instances, as a percentage of the total number of instances
in the deployment. If you specify FLEET_PERCENT, then at the start
of the deployment AWS CodeDeploy converts the percentage to the
equivalent number of instances and rounds fractional instances up.
The value parameter takes an integer.
For example, to set a minimum of 95% healthy instances, specify a type
of FLEET_PERCENT and a value of 95.
"""
params = {'deploymentConfigName': deployment_config_name, }
if minimum_healthy_hosts is not None:
params['minimumHealthyHosts'] = minimum_healthy_hosts
return self.make_request(action='CreateDeploymentConfig',
body=json.dumps(params))
def create_deployment_group(self, application_name,
deployment_group_name,
deployment_config_name=None,
ec_2_tag_filters=None,
auto_scaling_groups=None,
service_role_arn=None):
"""
Creates a new deployment group for application revisions to be
deployed to.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
:type deployment_config_name: string
:param deployment_config_name: If specified, the deployment
configuration name must be one of the predefined values, or it can
be a custom deployment configuration:
+ CodeDeployDefault.AllAtOnce deploys an application revision to up to
all of the Amazon EC2 instances at once. The overall deployment
succeeds if the application revision deploys to at least one of the
instances. The overall deployment fails after the application
revision fails to deploy to all of the instances. For example, for
9 instances, deploy to up to all 9 instances at once. The overall
deployment succeeds if any of the 9 instances is successfully
deployed to, and it fails if all 9 instances fail to be deployed
to.
+ CodeDeployDefault.HalfAtATime deploys to up to half of the instances
at a time (with fractions rounded down). The overall deployment
succeeds if the application revision deploys to at least half of
the instances (with fractions rounded up); otherwise, the
deployment fails. For example, for 9 instances, deploy to up to 4
instances at a time. The overall deployment succeeds if 5 or more
instances are successfully deployed to; otherwise, the deployment
fails. Note that the deployment may successfully deploy to some
instances, even if the overall deployment fails.
+ CodeDeployDefault.OneAtATime deploys the application revision to only
one of the instances at a time. The overall deployment succeeds if
the application revision deploys to all of the instances. The
overall deployment fails after the application revision first fails
to deploy to any one instance. For example, for 9 instances, deploy
to one instance at a time. The overall deployment succeeds if all 9
instances are successfully deployed to, and it fails if any of one
of the 9 instances fail to be deployed to. Note that the deployment
may successfully deploy to some instances, even if the overall
deployment fails. This is the default deployment configuration if a
configuration isn't specified for either the deployment or the
deployment group.
To create a custom deployment configuration, call the create deployment
configuration operation.
:type ec_2_tag_filters: list
:param ec_2_tag_filters: The Amazon EC2 tags to filter on.
:type auto_scaling_groups: list
:param auto_scaling_groups: A list of associated Auto Scaling groups.
:type service_role_arn: string
:param service_role_arn: A service role ARN that allows AWS CodeDeploy
to act on the user's behalf when interacting with AWS services.
"""
params = {
'applicationName': application_name,
'deploymentGroupName': deployment_group_name,
}
if deployment_config_name is not None:
params['deploymentConfigName'] = deployment_config_name
if ec_2_tag_filters is not None:
params['ec2TagFilters'] = ec_2_tag_filters
if auto_scaling_groups is not None:
params['autoScalingGroups'] = auto_scaling_groups
if service_role_arn is not None:
params['serviceRoleArn'] = service_role_arn
return self.make_request(action='CreateDeploymentGroup',
body=json.dumps(params))
def delete_application(self, application_name):
"""
Deletes an application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
"""
params = {'applicationName': application_name, }
return self.make_request(action='DeleteApplication',
body=json.dumps(params))
def delete_deployment_config(self, deployment_config_name):
"""
Deletes a deployment configuration.
A deployment configuration cannot be deleted if it is
currently in use. Also, predefined configurations cannot be
deleted.
:type deployment_config_name: string
:param deployment_config_name: The name of an existing deployment
configuration within the AWS user account.
"""
params = {'deploymentConfigName': deployment_config_name, }
return self.make_request(action='DeleteDeploymentConfig',
body=json.dumps(params))
def delete_deployment_group(self, application_name,
deployment_group_name):
"""
Deletes a deployment group.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
"""
params = {
'applicationName': application_name,
'deploymentGroupName': deployment_group_name,
}
return self.make_request(action='DeleteDeploymentGroup',
body=json.dumps(params))
def get_application(self, application_name):
"""
Gets information about an application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
"""
params = {'applicationName': application_name, }
return self.make_request(action='GetApplication',
body=json.dumps(params))
def get_application_revision(self, application_name, revision):
"""
Gets information about an application revision.
:type application_name: string
:param application_name: The name of the application that corresponds
to the revision.
:type revision: dict
:param revision: Information about the application revision to get,
including the revision's type and its location.
"""
params = {
'applicationName': application_name,
'revision': revision,
}
return self.make_request(action='GetApplicationRevision',
body=json.dumps(params))
def get_deployment(self, deployment_id):
"""
Gets information about a deployment.
:type deployment_id: string
:param deployment_id: An existing deployment ID within the AWS user
account.
"""
params = {'deploymentId': deployment_id, }
return self.make_request(action='GetDeployment',
body=json.dumps(params))
def get_deployment_config(self, deployment_config_name):
"""
Gets information about a deployment configuration.
:type deployment_config_name: string
:param deployment_config_name: The name of an existing deployment
configuration within the AWS user account.
"""
params = {'deploymentConfigName': deployment_config_name, }
return self.make_request(action='GetDeploymentConfig',
body=json.dumps(params))
def get_deployment_group(self, application_name, deployment_group_name):
"""
Gets information about a deployment group.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
"""
params = {
'applicationName': application_name,
'deploymentGroupName': deployment_group_name,
}
return self.make_request(action='GetDeploymentGroup',
body=json.dumps(params))
def get_deployment_instance(self, deployment_id, instance_id):
"""
Gets information about an Amazon EC2 instance as part of a
deployment.
:type deployment_id: string
:param deployment_id: The unique ID of a deployment.
:type instance_id: string
:param instance_id: The unique ID of an Amazon EC2 instance in the
deployment's deployment group.
"""
params = {
'deploymentId': deployment_id,
'instanceId': instance_id,
}
return self.make_request(action='GetDeploymentInstance',
body=json.dumps(params))
def list_application_revisions(self, application_name, sort_by=None,
sort_order=None, s_3_bucket=None,
s_3_key_prefix=None, deployed=None,
next_token=None):
"""
Lists information about revisions for an application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type sort_by: string
:param sort_by: The column name to sort the list results by:
+ registerTime: Sort the list results by when the revisions were
registered with AWS CodeDeploy.
+ firstUsedTime: Sort the list results by when the revisions were first
used by in a deployment.
+ lastUsedTime: Sort the list results by when the revisions were last
used in a deployment.
If not specified or set to null, the results will be returned in an
arbitrary order.
:type sort_order: string
:param sort_order: The order to sort the list results by:
+ ascending: Sort the list results in ascending order.
+ descending: Sort the list results in descending order.
If not specified, the results will be sorted in ascending order.
If set to null, the results will be sorted in an arbitrary order.
:type s_3_bucket: string
:param s_3_bucket: A specific Amazon S3 bucket name to limit the search
for revisions.
If set to null, then all of the user's buckets will be searched.
:type s_3_key_prefix: string
:param s_3_key_prefix: A specific key prefix for the set of Amazon S3
objects to limit the search for revisions.
:type deployed: string
:param deployed:
Whether to list revisions based on whether the revision is the target
revision of an deployment group:
+ include: List revisions that are target revisions of a deployment
group.
+ exclude: Do not list revisions that are target revisions of a
deployment group.
+ ignore: List all revisions, regardless of whether they are target
revisions of a deployment group.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list application revisions call, which can be used to return the
next set of applications in the list.
"""
params = {'applicationName': application_name, }
if sort_by is not None:
params['sortBy'] = sort_by
if sort_order is not None:
params['sortOrder'] = sort_order
if s_3_bucket is not None:
params['s3Bucket'] = s_3_bucket
if s_3_key_prefix is not None:
params['s3KeyPrefix'] = s_3_key_prefix
if deployed is not None:
params['deployed'] = deployed
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListApplicationRevisions',
body=json.dumps(params))
def list_applications(self, next_token=None):
"""
Lists the applications registered within the AWS user account.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list applications call, which can be used to return the next set of
applications in the list.
"""
params = {}
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListApplications',
body=json.dumps(params))
def list_deployment_configs(self, next_token=None):
"""
Lists the deployment configurations within the AWS user
account.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployment configurations call, which can be used to return
the next set of deployment configurations in the list.
"""
params = {}
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListDeploymentConfigs',
body=json.dumps(params))
def list_deployment_groups(self, application_name, next_token=None):
"""
Lists the deployment groups for an application registered
within the AWS user account.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployment groups call, which can be used to return the next
set of deployment groups in the list.
"""
params = {'applicationName': application_name, }
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListDeploymentGroups',
body=json.dumps(params))
def list_deployment_instances(self, deployment_id, next_token=None,
instance_status_filter=None):
"""
Lists the Amazon EC2 instances for a deployment within the AWS
user account.
:type deployment_id: string
:param deployment_id: The unique ID of a deployment.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployment instances call, which can be used to return the
next set of deployment instances in the list.
:type instance_status_filter: list
:param instance_status_filter:
A subset of instances to list, by status:
+ Pending: Include in the resulting list those instances with pending
deployments.
+ InProgress: Include in the resulting list those instances with in-
progress deployments.
+ Succeeded: Include in the resulting list those instances with
succeeded deployments.
+ Failed: Include in the resulting list those instances with failed
deployments.
+ Skipped: Include in the resulting list those instances with skipped
deployments.
+ Unknown: Include in the resulting list those instances with
deployments in an unknown state.
"""
params = {'deploymentId': deployment_id, }
if next_token is not None:
params['nextToken'] = next_token
if instance_status_filter is not None:
params['instanceStatusFilter'] = instance_status_filter
return self.make_request(action='ListDeploymentInstances',
body=json.dumps(params))
def list_deployments(self, application_name=None,
deployment_group_name=None,
include_only_statuses=None, create_time_range=None,
next_token=None):
"""
Lists the deployments under a deployment group for an
application registered within the AWS user account.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
:type include_only_statuses: list
:param include_only_statuses: A subset of deployments to list, by
status:
+ Created: Include in the resulting list created deployments.
+ Queued: Include in the resulting list queued deployments.
+ In Progress: Include in the resulting list in-progress deployments.
+ Succeeded: Include in the resulting list succeeded deployments.
+ Failed: Include in the resulting list failed deployments.
+ Aborted: Include in the resulting list aborted deployments.
:type create_time_range: dict
:param create_time_range: A deployment creation start- and end-time
range for returning a subset of the list of deployments.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployments call, which can be used to return the next set of
deployments in the list.
"""
params = {}
if application_name is not None:
params['applicationName'] = application_name
if deployment_group_name is not None:
params['deploymentGroupName'] = deployment_group_name
if include_only_statuses is not None:
params['includeOnlyStatuses'] = include_only_statuses
if create_time_range is not None:
params['createTimeRange'] = create_time_range
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListDeployments',
body=json.dumps(params))
def register_application_revision(self, application_name, revision,
description=None):
"""
Registers with AWS CodeDeploy a revision for the specified
application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type description: string
:param description: A comment about the revision.
:type revision: dict
:param revision: Information about the application revision to
register, including the revision's type and its location.
"""
params = {
'applicationName': application_name,
'revision': revision,
}
if description is not None:
params['description'] = description
return self.make_request(action='RegisterApplicationRevision',
body=json.dumps(params))
def stop_deployment(self, deployment_id):
"""
Attempts to stop an ongoing deployment.
:type deployment_id: string
:param deployment_id: The unique ID of a deployment.
"""
params = {'deploymentId': deployment_id, }
return self.make_request(action='StopDeployment',
body=json.dumps(params))
def update_application(self, application_name=None,
new_application_name=None):
"""
Changes an existing application's name.
:type application_name: string
:param application_name: The current name of the application that you
want to change.
:type new_application_name: string
:param new_application_name: The new name that you want to change the
application to.
"""
params = {}
if application_name is not None:
params['applicationName'] = application_name
if new_application_name is not None:
params['newApplicationName'] = new_application_name
return self.make_request(action='UpdateApplication',
body=json.dumps(params))
def update_deployment_group(self, application_name,
current_deployment_group_name,
new_deployment_group_name=None,
deployment_config_name=None,
ec_2_tag_filters=None,
auto_scaling_groups=None,
service_role_arn=None):
"""
Changes information about an existing deployment group.
:type application_name: string
:param application_name: The application name corresponding to the
deployment group to update.
:type current_deployment_group_name: string
:param current_deployment_group_name: The current name of the existing
deployment group.
:type new_deployment_group_name: string
:param new_deployment_group_name: The new name of the deployment group,
if you want to change it.
:type deployment_config_name: string
:param deployment_config_name: The replacement deployment configuration
name to use, if you want to change it.
:type ec_2_tag_filters: list
:param ec_2_tag_filters: The replacement set of Amazon EC2 tags to
filter on, if you want to change them.
:type auto_scaling_groups: list
:param auto_scaling_groups: The replacement list of Auto Scaling groups
to be included in the deployment group, if you want to change them.
:type service_role_arn: string
:param service_role_arn: A replacement service role's ARN, if you want
to change it.
"""
params = {
'applicationName': application_name,
'currentDeploymentGroupName': current_deployment_group_name,
}
if new_deployment_group_name is not None:
params['newDeploymentGroupName'] = new_deployment_group_name
if deployment_config_name is not None:
params['deploymentConfigName'] = deployment_config_name
if ec_2_tag_filters is not None:
params['ec2TagFilters'] = ec_2_tag_filters
if auto_scaling_groups is not None:
params['autoScalingGroups'] = auto_scaling_groups
if service_role_arn is not None:
params['serviceRoleArn'] = service_role_arn
return self.make_request(action='UpdateDeploymentGroup',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| 45.115556 | 102 | 0.667274 |
a4286b08898ef3f4b1bed01ab3a4ee36c61fc371 | 24 | py | Python | c/PBP_net/__init__.py | jackonelli/Probabilistic-Backpropagation | ee20b7fc917f82c7198e1c6e3b9a3ef88c436014 | [
"BSD-3-Clause"
] | null | null | null | c/PBP_net/__init__.py | jackonelli/Probabilistic-Backpropagation | ee20b7fc917f82c7198e1c6e3b9a3ef88c436014 | [
"BSD-3-Clause"
] | null | null | null | c/PBP_net/__init__.py | jackonelli/Probabilistic-Backpropagation | ee20b7fc917f82c7198e1c6e3b9a3ef88c436014 | [
"BSD-3-Clause"
] | null | null | null | from PBP_net import pbp
| 12 | 23 | 0.833333 |
204eb95fb180ac22b2803a1b3501bee0fd13b14b | 4,423 | py | Python | .history/classes/Menu_20171107144533.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | .history/classes/Menu_20171107144533.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | .history/classes/Menu_20171107144533.py | reecebenson/DADSA-Tennis-PartA | d0763f819b300fcd0ce27041f5bc4ef0519c00bf | [
"MIT"
] | null | null | null | # DADSA - Assignment 1
# Reece Benson
from os import system as call
from collections import OrderedDict
class Menu():
# Define the variables we will be using
_app = None
_menu = None
_current_menu = 0
def __init__(self, app):
# Set our Application
self._app = app
def load(self):
# Define our Menu
self._menu = OrderedDict()
# Main Menu
self._menu["main"] = OrderedDict([("New Season", "new_season"), ("Load Season", "load_season")])
# New Season Menu
self._menu["new_season"] = OrderedDict([("Players", "ns_players"), ("Tournaments", "ns_tournaments"), ("Prize Money", "ns_prizemoney"), ("Difficulty", "ns_difficulty")])
# Load Season Menu
self._menu["load_season"] = OrderedDict()
# Append our Seasons to the "Load Season" Menu
for seasonId in self._app.handler.get_seasons():
season = self._app.handler.get_season(seasonId)
self._menu["load_season"].update({ season.name(): "load_season_"+str(seasonId) })
# Display our Menu
self.display()
def display(self, index = None):
# Clear our terminal window
call("cls")
# Define our variables
cur_count = 0
menu_item = self.get_menu(index or self.get_current_menu_index())
# Menu Title, set tree
tree = "(current: " + self.get_menu_name(self.get_current_menu_index()) + ")"
print("Please select an option: {}".format(tree))
menu_counter = 0
for m in self._menu[menu_item]:
# Increase our Counter
menu_counter += 1
# Is the Menu Item a Function?
m_type = None
if(callable(m)): m_type = ""
else: m_type = "->"
# Print our Menu Item
print("{0}. {1} {2}".format(menu_counter, m, m_type))
# Get User Input
self.get_input()
def get_current_menu_index(self):
return self._current_menu
def set_current_menu_index(self, new_index):
self._current_menu = new_index
def get_menu_name(self, index):
return [ (v) for k,v in enumerate(self._menu) if(k == index) ][0]
def get_menu(self, index):
menu_item = self.get_menu_name(index)
return menu_item
def get_input(self):
# Wrap this in a try/except to validate any errors with input
# Get User's Input
resp = input(">>> ")
# Validate user's input
if(type(resp) == int):
print("int")
else:
print("fak u", type(resp))
try:
m = input('>>> ')
if(m == "exit"):
raise KeyboardInterrupt
elif(m == ""):
return self.get_input()
try:
if(debug):
print("Entered: {0} on curMenu {1}".format(m, curMenu))
# Store our selected season
if(curMenu == "view_season" or curMenu == "emulate_season"):
seasons = self.handler.getSeasons()
if((int(m)-1) in seasons):
self.selectedSeason = int(m)-1
# Get Key by ID
menus = self.getOtherMenus(curMenu)
# Convert Index to integer
selected_index = int(m)-1
# Check out index is not out of scope
if(selected_index < 0 or selected_index >= len(self.options[curMenu])):
raise IndexError()
else:
menu = menus[selected_index][0]
# Check if the object found is a method or another menu
if(callable(self.options[menu])):
self.options[menu]()
else:
self.loadMenu(menu)
except KeyError:
self.loadMenu(curMenu, True, m)
except IndexError:
self.loadMenu(curMenu, True, "{0} is not a valid option.".format(m))
except Exception as e:
if(m == "exit"):
sys.exit()
else:
self.loadMenu(curMenu, True, str(e))
except KeyboardInterrupt:
self.shouldExit()
def load_action(self, menu_id):
#TODO: Load Action from Menu_ID
print("Load Action") | 31.592857 | 177 | 0.5286 |
af23e8a9665a0c8f0eca3cbbb6d262296c1596ce | 8,393 | py | Python | docs/conf.py | Alveo/pyalveo | 1e9eec22bc031bc9a08066f9966565a546e6242e | [
"BSD-3-Clause"
] | 2 | 2016-12-04T04:32:34.000Z | 2019-04-18T09:38:33.000Z | docs/conf.py | Alveo/pyalveo | 1e9eec22bc031bc9a08066f9966565a546e6242e | [
"BSD-3-Clause"
] | 4 | 2017-05-24T01:37:48.000Z | 2018-04-09T02:35:25.000Z | docs/conf.py | Alveo/pyalveo | 1e9eec22bc031bc9a08066f9966565a546e6242e | [
"BSD-3-Clause"
] | 2 | 2016-11-21T03:49:43.000Z | 2017-10-05T04:08:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pyalveo
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pyalveo'
copyright = u'2014, Steve Cassidy'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pyalveo.__version__
# The full version, including alpha/beta/rc tags.
release = pyalveo.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyalveodoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pyalveo.tex',
u'Pyalveo Documentation',
u'Steve Cassidy', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyalveo',
u'Pyalveo Documentation',
[u'Steve Cassidy'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyalveo',
u'Pyalveo Documentation',
u'Steve Cassidy',
'pyalveo',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | 30.52 | 76 | 0.715358 |
b7375343672d555258ab79a1080180f27c6682db | 9,997 | py | Python | fuzz_lightyear/request.py | tanx16/fuzz-lightyear | 3b311686797d9eda0eed5b8d4831b70d2ab10221 | [
"Apache-2.0"
] | null | null | null | fuzz_lightyear/request.py | tanx16/fuzz-lightyear | 3b311686797d9eda0eed5b8d4831b70d2ab10221 | [
"Apache-2.0"
] | null | null | null | fuzz_lightyear/request.py | tanx16/fuzz-lightyear | 3b311686797d9eda0eed5b8d4831b70d2ab10221 | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
from functools import lru_cache
from typing import Any
from typing import Callable
from typing import cast
from typing import Dict
from typing import List
from typing import Optional
from urllib.parse import quote_plus
from urllib.parse import urlencode
from bravado.client import CallableOperation
from bravado_core.param import get_param_type_spec # type: ignore
from cached_property import cached_property # type: ignore
from hypothesis.searchstrategy.strategies import SearchStrategy
from .datastore import get_post_fuzz_hooks
from .fuzzer import fuzz_parameters
from .output.logging import log
from .output.util import print_warning
from .supplements.abstraction import get_abstraction
COLLECTION_FORMAT_CHARS = {
'csv': ',',
'tsv': '\t',
'pipes': '|',
'ssv': ' ',
}
class FuzzingRequest:
def __init__(
self,
operation_id: str,
tag: str = 'default',
**kwargs: Any,
) -> None:
"""
:param operation_id: unique identifier for each Swagger operation.
:param tag: this is how Swagger operations are grouped.
"""
self.tag = tag
self.operation_id = operation_id
self.fuzzed_input = kwargs # type: Optional[Dict[str, Any]]
if not self.fuzzed_input:
self.fuzzed_input = None
# This SearchStrategy should be generated with hypothesis' `fixed_dictionaries`,
# mapping keys to SearchStrategy.
self._fuzzed_input_factory = None # type: Optional[SearchStrategy]
@property
def id(self) -> str:
return '{}.{}'.format(
self.tag,
self.operation_id,
)
def _encode_array_in_path(
self,
fuzzed_input: List,
collection_format: str,
) -> str:
separator = quote_plus(COLLECTION_FORMAT_CHARS[collection_format])
return separator.join([str(i) for i in fuzzed_input])
def json(self) -> Dict[str, Any]:
path = self._swagger_operation.path_name # type: str
params = defaultdict(dict) # type: Dict[str, Dict[str, Any]]
if self.fuzzed_input:
for key, value in self._swagger_operation.params.items():
if key not in self.fuzzed_input:
continue
if value.location == 'path':
if value.param_spec['type'] == 'array':
fuzzed_input = self._encode_array_in_path(
self.fuzzed_input[key],
value.param_spec.get('collectionFormat', 'csv'),
)
else:
fuzzed_input = str(self.fuzzed_input[key])
path = path.replace(
f'{{{key}}}',
fuzzed_input,
)
else:
params[value.location][key] = self.fuzzed_input[key]
return {
'method': self._swagger_operation.http_method.upper(),
'path': path,
**params,
}
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.tag}.{self.operation_id})'
def __str__(self) -> str:
data = self.json()
url = (
f'{self._swagger_operation.swagger_spec.api_url.rstrip("/")}'
f'{data["path"]}'
)
if 'query' in data:
url += '?'
for key, value in data['query'].items():
if not isinstance(value, list):
# NOTE: value should not be a dict, for a query param.
value = [value]
for v in value:
url += f'{key}={quote_plus(str(v).encode())}&'
url = url.rstrip('&')
args = []
if 'formData' in data:
args.append(f'--data \'{urlencode(data["formData"])}\'')
if 'header' in data:
for key, value in data['header'].items():
args.append(f'-H \'{key}: {value}\'')
return f'curl -X {data["method"]} {url} {" ".join(args)}'.rstrip()
def send(
self,
auth: Optional[Dict[str, Any]] = None,
*args: Any,
should_log: bool = True,
data: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""
:param auth: parameters to pass to abstracted request method to specify
the user making the request.
:param should_log: this should only be false, if we're sending a
duplicate request as part of a plugin.
"""
if not data:
data = {}
# Empty dictionary means we're not sending parameters.
if self.fuzzed_input is None:
self.fuzzed_input = self.fuzz(data)
self.apply_post_fuzz_hooks(self.fuzzed_input)
if not auth:
auth = get_victim_session_factory()()
if should_log:
log.info(str(self))
_merge_auth_headers(self.fuzzed_input, auth)
# auth details should override fuzzed_input, because
# specifics should always override randomly generated content
kwargs = _merge_kwargs(self.fuzzed_input, auth, kwargs)
return get_abstraction().request_method(
operation_id=self.operation_id,
tag=self.tag,
*args,
**kwargs,
)
def fuzz(self, existing_data: Dict[str, Any]) -> Dict[str, Any]:
"""Returns a dictionary of values which can be used
to call the operation being fuzzed.
"""
if not self._fuzzed_input_factory:
parameters = []
for name, param in self._swagger_operation.params.items():
specification = get_param_type_spec(param).copy()
if param.location == 'body':
# For 'body' parameters, bravado discards information from the
# param spec itself. We pass in the 'required' parameter in this
# case.
# For the 'name' argument (seeing that body parameters can be
# named differently), we pass it in separately as it breaks the
# swagger specification if we group it together.
specification['required'] = param.required
parameters.append((name, specification,))
self._fuzzed_input_factory = fuzz_parameters(parameters)
# NOTE: If we were really worried about performance later on,
# we might be able to address this. Specifically, we don't
# *need* to generate examples, just to throw it away later
# if the key is already in data.
# However, this involves parameter modification, which may
# require a more involved change.
fuzzed_input = {}
for key, value in self._fuzzed_input_factory.example().items():
if key in existing_data:
fuzzed_input[key] = existing_data[key]
continue
if value is not None:
fuzzed_input[key] = value
return fuzzed_input
def apply_post_fuzz_hooks(self, fuzzed_input: Dict[str, Any]) -> None:
"""After parameters for a request are fuzzed, this function
applies developer-supplied post-fuzz hooks to the fuzzed
input.
:param fuzzed_input: The initial fuzz result from `self.fuzz`.
"""
hooks = get_post_fuzz_hooks(self.operation_id, self.tag)
for hook in hooks:
hook(
self._swagger_operation,
fuzzed_input,
)
@cached_property # type: ignore
def _swagger_operation(self) -> CallableOperation:
return cast(
CallableOperation,
getattr(
getattr(get_abstraction().client, self.tag),
self.operation_id,
),
)
@lru_cache(maxsize=1)
def get_victim_session_factory() -> Callable[..., Dict[str, Any]]:
factory = get_abstraction().get_victim_session
if factory:
return factory
print_warning('No auth method specified.')
return lambda: {}
def _merge_auth_headers(fuzzed_params: Dict[str, Any], auth: Dict[str, Any]) -> None:
"""
The underlying Bravado client allows us to specify request headers on a per-request
basis (https://bravado.readthedocs.io/en/stable/configuration.html#per-request-configuration). # noqa: E501
However, when there are authorization headers specified by the Swagger specification,
the Bravado library parses it and merges it within the parameters required for the
callable operation.
This means, our fuzzing engine will set a value for it, which will override the
manually specified authorization headers. To address this fact, we replace the fuzzed
header with the actual one.
"""
if not auth.get('_request_options', {}).get('headers', None):
return
for key in auth['_request_options']['headers']:
# It looks like Bravado does some serialization for Python purposes, so we need
# to mirror this.
key = key.replace('-', '_')
if key in fuzzed_params:
fuzzed_params.pop(key)
def _merge_kwargs(*args: Any) -> Dict[str, Any]:
"""Merges the input dictionaries into a single dictionary which
can be used in a fuzzing request."""
# Merge headers first, then top-level parameters.
headers = {} # type: Dict[str, str]
for dictionary in args:
headers.update(dictionary.get('_request_options', {}).get('headers', {}))
output = {} # type: Dict[str, Any]
for dictionary in args:
output.update(dictionary)
if not headers:
return output
if not output['_request_options']:
output['_request_options'] = {}
output['_request_options']['headers'] = headers
return output
| 34.832753 | 112 | 0.589777 |
28ff079b9134f4a464d0ed8209bda1564488e91e | 7,103 | py | Python | tests/ut/python/parallel/test_auto_parallel_two_matmul.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 55 | 2020-12-17T10:26:06.000Z | 2022-03-28T07:18:26.000Z | tests/ut/python/parallel/test_auto_parallel_two_matmul.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | null | null | null | tests/ut/python/parallel/test_auto_parallel_two_matmul.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 14 | 2021-01-29T02:39:47.000Z | 2022-03-23T05:00:26.000Z | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.parallel import _cost_model_context as cost_model_context
from mindspore.parallel._cost_model_context import _set_algo_single_loop, _get_algo_single_loop
from mindspore.parallel import set_algo_parameters, get_algo_parameters, reset_algo_parameters
from mindspore.parallel._utils import _reset_op_id as reset_op_id
from tests.ut.python.ops.test_math_ops import VirtualLoss
grad_all = C.GradOperation(get_all=True)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x, y, b):
predict = self.network(x, y, b)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y, b):
return grad_all(self.network)(x, y, b)
# model_parallel test
def test_two_matmul():
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.matmul1 = P.MatMul()
self.matmul2 = P.MatMul()
def construct(self, x, y, b):
out = self.matmul1(x, y)
out = self.matmul2(out, b)
return out
size = 16
context.set_auto_parallel_context(device_num=size, global_rank=0)
cost_model_context.set_cost_model_context(device_memory_capacity=32.0 * 1024.0 * 1024.0 * 1024.0,
costmodel_alpha=1.0,
costmodel_beta=60.0,
costmodel_gamma=0.1,
costmodel_communi_threshold=1024.0,
costmodel_communi_const=2222.0,
costmodel_communi_bias=1111.0)
dev_mem_cap = cost_model_context.get_cost_model_context("device_memory_capacity")
assert dev_mem_cap == 32.0 * 1024.0 * 1024.0 * 1024.0
costmodel_alpha = cost_model_context.get_cost_model_context("costmodel_alpha")
assert costmodel_alpha == 1.0
costmodel_beta = cost_model_context.get_cost_model_context("costmodel_beta")
assert costmodel_beta == 60.0
costmodel_gamma = cost_model_context.get_cost_model_context("costmodel_gamma")
assert costmodel_gamma == 0.1
costmodel_communi_threshold = cost_model_context.get_cost_model_context("costmodel_communi_threshold")
assert costmodel_communi_threshold == 1024.0
costmodel_communi_const = cost_model_context.get_cost_model_context("costmodel_communi_const")
assert costmodel_communi_const == 2222.0
costmodel_communi_bias = cost_model_context.get_cost_model_context("costmodel_communi_bias")
assert costmodel_communi_bias == 1111.0
cost_model_context.reset_cost_model_context()
dev_mem_cap = cost_model_context.get_cost_model_context("device_memory_capacity")
assert dev_mem_cap == 16.0 * 1024.0 * 1024.0 * 1024.0
costmodel_alpha = cost_model_context.get_cost_model_context("costmodel_alpha")
assert costmodel_alpha == 1.0
costmodel_beta = cost_model_context.get_cost_model_context("costmodel_beta")
assert costmodel_beta == 400.0
costmodel_gamma = cost_model_context.get_cost_model_context("costmodel_gamma")
assert costmodel_gamma == 0.001
costmodel_communi_threshold = cost_model_context.get_cost_model_context("costmodel_communi_threshold")
assert costmodel_communi_threshold == 2048.0
costmodel_communi_const = cost_model_context.get_cost_model_context("costmodel_communi_const")
assert costmodel_communi_const == 3072.0
costmodel_communi_bias = cost_model_context.get_cost_model_context("costmodel_communi_bias")
assert costmodel_communi_bias == 1024.0
set_algo_parameters(tensor_slice_align_enable=False, tensor_slice_align_size=32,
fully_use_devices=False, elementwise_op_strategy_follow=False,
enable_algo_approxi=True, algo_approxi_epsilon=0.001)
para_slice_align_enable = get_algo_parameters("tensor_slice_align_enable")
assert not para_slice_align_enable
para_slice_align_size = get_algo_parameters("tensor_slice_align_size")
assert para_slice_align_size == 32
fully_use_devices = get_algo_parameters("fully_use_devices")
assert not fully_use_devices
elementwise_op_strategy_follow = get_algo_parameters("elementwise_op_strategy_follow")
assert not elementwise_op_strategy_follow
enable_approxi = get_algo_parameters("enable_algo_approxi")
assert enable_approxi
algo_epsilon = get_algo_parameters("algo_approxi_epsilon")
assert algo_epsilon == 0.001
expecte_single_loop = True
signle_loop = _get_algo_single_loop()
assert expecte_single_loop == signle_loop
expecte_single_loop = False
_set_algo_single_loop(expecte_single_loop)
signle_loop = _get_algo_single_loop()
assert expecte_single_loop == signle_loop
reset_algo_parameters()
para_slice_align_enable = get_algo_parameters("tensor_slice_align_enable")
assert not para_slice_align_enable
para_slice_align_size = get_algo_parameters("tensor_slice_align_size")
assert para_slice_align_size == 16
fully_use_devices = get_algo_parameters("fully_use_devices")
assert fully_use_devices
elementwise_op_strategy_follow = get_algo_parameters("elementwise_op_strategy_follow")
assert not elementwise_op_strategy_follow
enable_approxi = get_algo_parameters("enable_algo_approxi")
assert not enable_approxi
algo_epsilon = get_algo_parameters("algo_approxi_epsilon")
assert algo_epsilon == 0.1
x = Tensor(np.ones([128, 32]), dtype=ms.float32)
y = Tensor(np.ones([32, 64]), dtype=ms.float32)
b = Tensor(np.ones([64, 64]), dtype=ms.float32)
net = NetWithLoss(Net())
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
reset_op_id()
net.set_train()
_executor.compile(net, x, y, b, phase='train')
strategies = _executor._get_shard_strategy(net)
for (k, v) in strategies.items():
if re.search('MatMul-op', k) is not None:
assert v == [[16, 1], [1, 1]]
| 43.845679 | 106 | 0.731803 |
78ada0b4ec4bf9ec0cebfdb00b20ce5aeaa465ce | 21,713 | py | Python | autoscale/server_control/aws.py | usaar33/ec2_autoscale | e1646869057c1d01d031b2a19bb62db5c31817aa | [
"MIT"
] | null | null | null | autoscale/server_control/aws.py | usaar33/ec2_autoscale | e1646869057c1d01d031b2a19bb62db5c31817aa | [
"MIT"
] | null | null | null | autoscale/server_control/aws.py | usaar33/ec2_autoscale | e1646869057c1d01d031b2a19bb62db5c31817aa | [
"MIT"
] | null | null | null | """
The aws module is intended to provide an interface to aws
It tightly interfaces with boto. Indeed, many functions require a boto connection object parameter
While it exposes boto objects (espcially instances) to callees, it provides the following ease of use ability:
* Unpacking reservations into reservations
* Functionality to wait until servers are booted
* Retrying aws commands on errors
* Defining aws parameters
* Other aws utility functions
"""
import datetime
import operator
import random
import socket
import time
import boto
import logging
logger = logging.getLogger('aws')
meta_url = 'http://instance-data/latest/meta-data/'
meta_data = ['ami-id', 'hostname', 'instance-id', 'instance-type', 'kernel-id',
'local-hostname', 'local-ipv4', 'public-hostname', 'public-ipv4']
# TODO(DEVELOPER): THIS DATA IS REALLY OUTDATED!!
# AWS API doesn't provide an easy way to access on-demand instance costs
# AWS definitions
REGION_US_EAST_1 = 'us-east-1'
REGION_US_WEST_1 = 'us-west-1'
REGION_US_WEST_2 = 'us-west-2'
REGION_AP_NORTHEAST_1 = 'ap-northeast-1'
REGION_AP_SOUTHEAST_1 = 'ap-southeast-1'
REGION_EU_WEST_1 = 'eu-east-1'
# information incomplete for regions other than us_east_1 and instances we don't use
od_instance_costs = {REGION_US_EAST_1: {'m1.small' : 0.06,
'm1.medium' : 0.12,
'm1.large' : 0.24,
'm1.xlarge' : 0.48,
't1.micro' : 0.02,
'm2.xlarge' : 0.41,
'm2.2xlarge' : .820,
'm2.4xlarge' : 1.640,
'c1.medium' : 0.145,
'c1.xlarge' : 0.58,
'cc1.4xlarge' : 1.3,
'cc2.8xlarge' : 2.4,
'cg1.4xlarge' : 2.1,
'hi1.4xlarge' : 3.1,
'cr1.8xlarge' : 3.5
},
REGION_US_WEST_1: {
},
}
#Definiton of instance boot AMIs we have on EC2
AMIs = {REGION_US_EAST_1: {'karmic32': 'ami-bb709dd2',
'karmic64': 'ami-55739e3c',
'lucid32': 'ami-4fd00726',
'lucid64': 'ami-35de095c',
'oneiric32' : 'ami-d1a671b8',
'oneiric64' : 'ami-4fa37426',
'precise64' : 'ami-cf5e2ba6',
'raring64' : 'ami-9597e1fc',
'setup_server': 'ami-2eff6047', # precise with aufs and wsshd, created 03/08/13
},
REGION_US_WEST_1: {
},
}
#Definition of EBS boot AMIs we have on EC2
AMIs_ebs = {REGION_US_EAST_1: {
'karmic32': 'ami-6743ae0e',
'karmic64': 'ami-7d43ae14',
'lucid32': 'ami-71dc0b18',
'lucid64': 'ami-55dc0b3c',
'oneiric32' : 'ami-6ba27502',
'oneiric64' : 'ami-6fa27506',
'precise64' : 'ami-e7582d8e',
'raring64' : 'ami-e995e380',
},
REGION_US_WEST_1: {
},
}
#Definition of HVM AMIs we have on EC2
# (All of these are also EBS-boot)
AMIs_hvm = {REGION_US_EAST_1: {
'natty64': 'ami-f1589598',
'oneiric64' : 'ami-beba68d7',
'precise64' : 'ami-f9582d90',
'raring64' : 'ami-eb95e382',
},
REGION_US_WEST_1: {
},
}
def get_ami(ami, zone, ebs_boot=False, instance_type = None):
"""Get AMI from our AMI list if it exists
Else return None"""
if not zone:
zone = REGION_US_EAST_1 + 'a'
if instance_type == 't1.micro':
# t1.micro lack instance storage
ebs_boot = True
region = zone[:-1] #dropping last letter should be region
if instance_type in ['cc1.4xlarge', 'cc2.8xlarge', 'cg1.4xlarge']:
# Cluster compute instances use ebs backed hvm
dct = AMIs_hvm
elif ebs_boot:
dct = AMIs_ebs
else:
dct = AMIs
return dct[region].get(ami,None)
imaged_amis = []
def gen_custom_image_table():
global imaged_amis
ami_dcts = [AMIs, AMIs_ebs, AMIs_hvm]
region_defs = [REGION_US_EAST_1]
# DEVELOPER: PUT YOUR CUSTOM AMI KEYS BELOW (e.g. ami-..)
ami_keys = ['']
for ami_dct in ami_dcts:
for region_def in region_defs:
region_dct = ami_dct.get(region_def)
if not region_dct:
continue
for ami_key in ami_keys:
ami = region_dct.get(ami_key)
if ami:
imaged_amis.append(ami)
gen_custom_image_table() #needed for below function
def is_custom_image(ami_id):
"""Determinte if an ami-id, e.g. ami-63be790a is imaged.
"""
global imaged_amis
return ami_id in imaged_amis
def retry_n_times(func, n, caller, *args, **kwargs):
"""Run function func(*args, **kawargs) n times until no EC2ResponseError or n is reached
caller is a string specifying who called this (for logging)"""
i= -1
while True:
try:
return func(*args,**kwargs)
except boto.exception.EC2ResponseError, e: #aws hickups sometimes
n-=1
i+=1
logger.error('%s: EC2ResponseError: %s', caller, e)
if n <= 0:
raise
else:
time.sleep(min(10,0.2 + (1<<i) * 0.5 * random.random())) # expodential backoff
continue
"""
Generic server spawning with boto
"""
def get_instances(connection, instance_ids = None, filters = None):
"""Get instances by instance_ids
A dictionary of filters can be provided as well
See http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/
"""
#reservations = connection.get_all_instances(instance_ids = instance_ids)
reservations = retry_n_times(connection.get_all_instances, 3, 'get_instances',
instance_ids = instance_ids, filters = filters)
return extract_instances(reservations)
def extract_instances(reservations):
"""Extract instances from a list of reservations"""
instances = []
for reservation in reservations:
try:
groups = [group.groupName for group in reservation.groups]
except AttributeError: #boto version < 2.0rc1
try:
groups = [group.name for group in reservation.groups]
except AttributeError:
groups = [group.id for group in reservation.groups]
for instance in reservation.instances:
instance.groups = groups
instances.append(instance)
return instances
"""Below need a boto EC2Connection object, connection, to work"""
def run_instances(connection, ami, ebs_boot = False, num=1, min_count=None, groups=['default'],
key_name='team', zone='us-east-1a', type='m1.small'):
"""
Returns reservation
reservation.instances accesses the actual instances
"""
my_ami = get_ami(ami, zone, ebs_boot, type)
if not my_ami:
my_ami = ami
if min_count == None:
min_count = num
reservation = connection.run_instances(image_id=my_ami, security_groups=groups, max_count=num,
min_count=min_count, instance_type=type, placement=zone,
key_name=key_name)
return reservation
rin = run_instances
def get_spot_requests(connection, request_ids = None, filters= None):
"""Get spot requests by request_ids
A dictionary of filters can be provided as well
http://docs.amazonwebservices.com/AWSEC2/latest/APIReference/index.html?ApiReference-query-DescribeSpotInstanceRequests.html
"""
#reservations = connection.get_all_instances(instance_ids = instance_ids)
reservations = retry_n_times(connection.get_all_spot_instance_requests, 3, 'get_spot_requests',
request_ids = request_ids, filters = filters)
return reservations
def get_spot_price_history(connection,start_time=None, end_time=None,
instance_type=None, product_description=None,
availability_zone=None):
"""Get spot price history.
start_time and end_time should be datetime objects in UTC or None
See boto's get_spot_price_history
encode timestamp as a datetime.datetime object
"""
"""
internally has a loop to handle boto returning 1,000 results max (no idea why)
"""
start_times = {} # dictionary maps instance_type + az to times
result_set = []
extra_loops = 0
while True:
start_time_str = start_time.isoformat() if start_time else None
end_time_str = end_time.isoformat() if end_time else None
price_hist = retry_n_times(connection.get_spot_price_history, 3, 'get_spot_price_history',
start_time=start_time_str, end_time=end_time_str,
instance_type=instance_type, product_description=product_description,
availability_zone=availability_zone)
for ph in price_hist:
ph.timestamp = ts = datetime.datetime.strptime(ph.timestamp, '%Y-%m-%dT%H:%M:%S.000Z')
key = '^'.join([ph.instance_type,ph.availability_zone])
if key not in start_times or ts < start_times[key]:
start_times[key] = ts
price_hist.sort(key=lambda ph: ph.timestamp )
result_set = price_hist + result_set
if not price_hist:
#print 'epricehist term %s records' % len(start_times)
break
if not start_time and not end_time: # just show 1000..
break
if end_time and price_hist[0].timestamp >= end_time: # can't go earlier!
break
if start_time: # verify that all az have been found
if price_hist[0].timestamp <= start_time: # at least one instance time has been resolved
extra_loops += 1
#print 'extra loop %s' % extra_loops
if extra_loops > 20:
# sanity check - don't go too far back
break
for record_start_time in start_times.values():
if record_start_time > start_time: # fail case
break
else: # all resolved successfully
#print 'rc term %s records' % len(start_times)
break
end_time = price_hist[0].timestamp
return result_set
def request_spot_instances(connection, max_price, ami, ebs_boot = False, num=1,
groups=['default'], key_name='team', zone='us-east-1a', type='m1.small'):
"""
Returns List of Spot requests
Price is a string, e.g. '0.08' for $0.08
"""
my_ami = get_ami(ami, zone, ebs_boot, type)
if not my_ami:
my_ami = ami
spot_requests = connection.request_spot_instances(image_id = my_ami, price=max_price,
security_groups = groups, count=num, instance_type=type,
placement = zone,key_name=key_name)
return spot_requests
def cancel_spot_requests(connection, spot_request_ids):
"""Terminate spot requests"""
if not spot_request_ids:
return
if not hasattr(spot_request_ids, '__iter__'):
spot_request_ids = [spot_request_ids]
return retry_n_times(connection.cancel_spot_instance_requests, 3, 'cancel_spot_requests',
request_ids = spot_request_ids)
def get_reserved_instances(connection, filters = None):
"""Get reserved instances"""
res_instances = retry_n_times(connection.get_all_reserved_instances, 3, 'get_reserved_instances', filters = filters)
return res_instances
def terminate_instances(connection, instance_ids):
if not hasattr(instance_ids, '__iter__'):
instance_ids = [instance_ids]
return retry_n_times(connection.terminate_instances, 3, 'terminate_instances',
instance_ids = instance_ids)
def set_instance_or_req_tags(connection, ids, tag_dict):
"""Set the tag of an instance(s) or spot request(s) with given ids
tag_dict - maps tags to values
"""
if not hasattr(ids, '__iter__'):
ids = [ids]
return retry_n_times(connection.create_tags, 9, 'set_instance_or_req_tags',
resource_ids = ids, tags = tag_dict)
tin = terminate_instances
describe_instances = get_instances
din = describe_instances
"""
Generic ebs volume management with boto
"""
def wait_for_status(ec2obj, status, num_polls=10, sleep_time=0.5,
do_raise=True):
"""Waits until ec2obj.status (or ec2obj.state) becomes *status*. Expects a
boto ec2 object with a status (or state) attribute and an update() method.
"""
field = None
if hasattr(ec2obj, 'status'):
field = 'status'
elif hasattr(ec2obj, 'state'):
field = 'state'
else:
raise Exception('ec2obj has no status or state attribute')
get_status = operator.attrgetter(field)
tries = 0
while True:
if get_status(ec2obj) == status or tries > num_polls:
break
time.sleep(sleep_time)
ec2obj.update()
tries += 1
if do_raise and get_status(ec2obj) != status:
raise Exception('ec2obj status %s != %s' % (get_status(ec2obj), status))
def get_volumes(connection, volume_ids=None, filters=None):
"""Get all volumes satisfying criteria.
* connection: An ec2 connection instance.
* volume_ids: IDs of volumes to retrieve.
* filters: Additional filtering criteria
Returns a list of volume objects.
"""
volumes = retry_n_times(connection.get_all_volumes, 3, 'get_volumes',
volume_ids=volume_ids, filters=filters)
return volumes
def get_volume(connection, volume_id):
"""Returns an ebs volume with *volume_id*."""
res_set = get_volumes(connection, [volume_id])
if not res_set or len(res_set) != 1:
raise Exception('unexpected result from get_volumes')
volume = res_set.pop()
return volume
def create_volume(connection, size, zone, snapshot=None, block=False,
num_polls=120, sleep_time=1, do_raise=True):
"""Creates an ebs volume.
* connection: An ec2 connection instance.
* size: Size of volume to create in GiB.
* zone: Availability zone in which the volume should be created.
* snapshot: Optional snapshot (or id) from which to create the volume.
* block: If True, waits until the volume has been attached successfully.
* num_polls: Max number of polls to perform while blocking.
* sleep_time: Seconds to wait between polls while blocking.
* do_raise: Raises exception if creation is not successful after block.
Returns the volume object that was created.
"""
volume = connection.create_volume(size, zone, snapshot)
if block:
wait_for_status(volume, 'available', num_polls, sleep_time, do_raise)
return volume
def delete_volume(connection, volume_id=None):
"""Deletes an ebs volume.
* connection: An ec2 connection instance.
* volume_id: ID of volume to delete.
Returns True if deletion is successful.
"""
return connection.delete_volume(volume_id)
def attach_volume(connection, volume_id, instance_id, device, block=False,
num_polls=60, sleep_time=0.5, do_raise=True):
"""Attaches an ebs volume to an ec2 instance.
* connection: An ec2 connection instance.
* volume_id: ID of volume to attach.
* instance_id: ID of instance where volume will be attached.
* device: Device file where volume will be accessible.
* block: If True, waits until the volume has been attached successfully.
* num_polls: Max number of polls to perform while blocking.
* sleep_time: Seconds to wait between polls while blocking.
* do_raise: Raises exception if attachment is not successful after block.
Returns True if successful.
"""
result = connection.attach_volume(volume_id, instance_id, device)
if result and block:
volume = get_volume(connection, volume_id)
wait_for_status(volume, 'in-use', num_polls, sleep_time, do_raise)
return result
def detach_volume(connection, volume_id, instance_id, device, force=False,
block=False, num_polls=120, sleep_time=0.5, do_raise=True):
"""Detaches an ebs volume from an instance.
* connection: An ec2 connection instance.
* volume_id: ID of volume to detach.
* instance_id: ID of instance from which volume will be detached.
* device: Device file where volume is accessible.
* block: If True, waits until the volume has been detached successfully.
* num_polls: Max number of polls to perform while blocking.
* sleep_time: Seconds to wait between polls while blocking.
* do_raise: Raises exception if detachment is not successful after block.
Returns True if successful.
"""
result = connection.detach_volume(volume_id, instance_id, device, force)
if result and block:
volume = get_volume(connection, volume_id)
wait_for_status(volume, 'available', num_polls, sleep_time, do_raise)
return result
def get_volume_tags(connection, volume_id):
"""Returns the tags of an ebs volume."""
volume = get_volume(connection, volume_id)
return volume.tags
def add_volume_tag(connection, volume_id, key, value=''):
"""Adds key/value as a tag to an ebs volume."""
volume = get_volume(connection, volume_id)
return volume.add_tag(key, value)
def remove_volume_tag(connection, volume_id, key, value=None):
"""Removes a tag from an ebs volume."""
volume = get_volume(connection, volume_id)
return volume.remove_tag(key, value)
"""
Generic snapshot management with boto
"""
def get_snapshots(connection, snapshot_ids=None, filters=None):
"""Get all snapshots satisfying criteria.
* connection: An ec2 connection instance.
* snapshot_ids: IDs of snapshots to retrieve.
* filters: Additional filtering criteria
Returns a list of snapshot objects.
"""
snapshots = retry_n_times(connection.get_all_snapshots, 3, 'get_snapshots',
snapshot_ids=snapshot_ids, filters=filters)
return snapshots
def get_snapshot(connection, snapshot_id):
"""Returns a snapshot with *snapshot_id*."""
res_set = get_snapshots(connection, [snapshot_id])
if not res_set or len(res_set) != 1:
raise Exception('unexpected result from get_snapshots')
snapshot = res_set.pop()
return snapshot
def create_snapshot(connection, volume_id, description=None, block=False,
num_polls=720, sleep_time=5, do_raise=True):
"""Creates a snapshot.
* connection: An ec2 connection instance.
* volume_id: ID of the ebs volume which should be snapshotted.
* description: Optional description for the snapshot.
* block: If True, waits until the snapshot creation has finished.
* num_polls: Max number of polls to perform while blocking.
* sleep_time: Seconds to wait between polls while blocking.
* do_raise: Raises exception if creation is not successful after block.
Returns the snapshot object that was created.
"""
snapshot = connection.create_snapshot(volume_id, description)
if block:
wait_for_status(snapshot, 'completed', num_polls, sleep_time, do_raise)
return snapshot
def delete_snapshot(connection, snapshot_id=None):
"""Deletes a snapshot.
* connection: An ec2 connection instance.
* volume_id: ID of snapshot to delete.
Returns True if deletion is successful.
"""
return connection.delete_snapshot(snapshot_id)
def get_snapshot_tags(connection, snapshot_id):
"""Returns the tags of a snapshot."""
snapshot = get_snapshot(connection, snapshot_id)
return snapshot.tags
def add_snapshot_tag(connection, snapshot_id, key, value=''):
"""Adds key/value as a tag to a snapshot."""
snapshot = get_snapshot(connection, snapshot_id)
return snapshot.add_tag(key, value)
def remove_snapshot_tag(connection, snapshot_id, key, value=None):
"""Removes a tag from a snapshot."""
snapshot = get_snapshot(connection, snapshot_id)
return snapshot.remove_tag(key, value)
"""
Useful utilities
"""
def is_ec2_instance(hostname=None):
"""Checks if *hostname* refers to an ec2 instance. If hostname is not
given, assumes the check is for the local machine.
"""
if hostname is None:
hostname = socket.getfqdn()
domain = (hostname.split('.'))[-1]
return domain == 'internal'
| 38.982047 | 141 | 0.613688 |
285db251c64854255249a7080b75a6d8d86f80a4 | 16,077 | py | Python | jsonargparse/util.py | shenmishajing/jsonargparse | f08329c49b267b9c21a845d533356ec8178c84ec | [
"MIT"
] | null | null | null | jsonargparse/util.py | shenmishajing/jsonargparse | f08329c49b267b9c21a845d533356ec8178c84ec | [
"MIT"
] | null | null | null | jsonargparse/util.py | shenmishajing/jsonargparse | f08329c49b267b9c21a845d533356ec8178c84ec | [
"MIT"
] | null | null | null | """Collection of general functions and classes."""
import inspect
import logging
import os
import re
import stat
import sys
import warnings
from collections import defaultdict
from contextlib import contextmanager, redirect_stderr
from contextvars import ContextVar
from typing import Any, Optional, Tuple, Union
from .loaders_dumpers import load_value
from .optionals import (
url_support,
import_requests,
import_url_validator,
fsspec_support,
import_fsspec,
get_config_read_mode,
)
from .type_checking import ArgumentParser
__all__ = [
'ParserError',
'null_logger',
'usage_and_exit_error_handler',
'Path',
'LoggerProperty',
]
null_logger = logging.getLogger('jsonargparse_null_logger')
null_logger.addHandler(logging.NullHandler())
null_logger.parent = None
NoneType = type(None)
class ParserError(Exception):
"""Error raised when parsing a value fails."""
pass
class JsonargparseWarning(UserWarning):
pass
def warning(message, category=JsonargparseWarning, stacklevel=1):
warnings.warn(
re.sub('\n\n+', '\n\n', re.sub('\n +', '\n ', message)),
category=category,
stacklevel=stacklevel+1,
)
def identity(value):
return value
def _parse_value_or_config(value: Any, enable_path: bool = True) -> Tuple[Any, Optional['Path']]:
"""Parses yaml/json config in a string or a path"""
cfg_path = None
if isinstance(value, str) and value.strip() != '':
parsed_val = load_value(value)
if not isinstance(parsed_val, str):
value = parsed_val
if enable_path and isinstance(value, str):
try:
cfg_path = Path(value, mode=get_config_read_mode())
except TypeError:
pass
else:
value = load_value(cfg_path.get_content())
if isinstance(value, dict) and cfg_path is not None:
value['__path__'] = cfg_path
return value, cfg_path
def usage_and_exit_error_handler(parser: 'ArgumentParser', message: str) -> None:
"""Error handler that prints the usage and exits with error code 2 (same behavior as argparse).
Args:
parser: The parser object.
message: The message describing the error being handled.
"""
parser.print_usage(sys.stderr)
args = {'prog': parser.prog, 'message': message}
sys.stderr.write('%(prog)s: error: %(message)s\n' % args)
parser.exit(2)
def _issubclass(cls, class_or_tuple):
"""Extension of issubclass that supports non-class argument."""
return inspect.isclass(cls) and issubclass(cls, class_or_tuple)
def import_object(name: str):
"""Returns an object in a module given its dot import path."""
if not isinstance(name, str) or '.' not in name:
raise ValueError(f'Expected a dot import path string: {name}')
if not all(x.isidentifier() for x in name.split('.')):
raise ValueError(f'Unexpected import path format: {name}')
name_module, name_object = name.rsplit('.', 1)
try:
parent = __import__(name_module, fromlist=[name_object])
except ModuleNotFoundError as ex:
if '.' not in name_module:
raise ex
name_module, name_object1 = name_module.rsplit('.', 1)
parent = getattr(__import__(name_module, fromlist=[name_object1]), name_object1)
return getattr(parent, name_object)
lenient_check: ContextVar = ContextVar('lenient_check', default=False)
@contextmanager
def _lenient_check_context(caller=None):
t = lenient_check.set(False if caller == 'argcomplete' else True)
try:
yield
finally:
lenient_check.reset(t)
@contextmanager
def _suppress_stderr():
"""A context manager that redirects stderr to devnull."""
with open(os.devnull, 'w') as fnull:
with redirect_stderr(fnull):
yield None
@contextmanager
def change_to_path_dir(path: Optional['Path']):
"""A context manager for running code in the directory of a path."""
chdir = path is not None and not (path.is_url or path.is_fsspec)
if chdir:
cwd = os.getcwd()
os.chdir(os.path.abspath(os.path.dirname(str(path))))
try:
yield None
finally:
if chdir:
os.chdir(cwd)
def indent_text(text: str) -> str:
return text.replace('\n', '\n ')
def known_to_fsspec(path: str) -> bool:
import_fsspec('known_to_fsspec')
from fsspec.registry import known_implementations
for protocol in known_implementations.keys():
if path.startswith(protocol+'://') or path.startswith(protocol+'::'):
return True
return False
class DirectedGraph:
def __init__(self):
self.nodes = []
self.edges_dict = defaultdict(list)
def add_edge(self, source, target):
for node in [source, target]:
if node not in self.nodes:
self.nodes.append(node)
self.edges_dict[self.nodes.index(source)].append(self.nodes.index(target))
def get_topological_order(self):
exploring = [False]*len(self.nodes)
visited = [False]*len(self.nodes)
order = []
for source in range(len(self.nodes)):
if not visited[source]:
self.topological_sort(source, exploring, visited, order)
return [self.nodes[n] for n in order]
def topological_sort(self, source, exploring, visited, order):
exploring[source] = True
for target in self.edges_dict[source]:
if exploring[target]:
raise ValueError(f'Graph has cycles, found while checking {self.nodes[source]} --> '+self.nodes[target])
elif not visited[target]:
self.topological_sort(target, exploring, visited, order)
visited[source] = True
exploring[source] = False
order.insert(0, source)
class Path:
"""Stores a (possibly relative) path and the corresponding absolute path.
When a Path instance is created it is checked that: the path exists, whether
it is a file or directory and whether has the required access permissions
(f=file, d=directory, r=readable, w=writeable, x=executable, c=creatable,
u=url, s=fsspec or in uppercase meaning not, i.e., F=not-file,
D=not-directory, R=not-readable, W=not-writeable and X=not-executable). The
absolute path can be obtained without having to remember the working
directory from when the object was created.
"""
def __init__(
self,
path: Union[str, 'Path'],
mode: str = 'fr',
cwd: Optional[str] = None,
skip_check: bool = False,
):
"""Initializer for Path instance.
Args:
path: The path to check and store.
mode: The required type and access permissions among [fdrwxcuFDRWX].
cwd: Working directory for relative paths. If None, then os.getcwd() is used.
skip_check: Whether to skip path checks.
Raises:
ValueError: If the provided mode is invalid.
TypeError: If the path does not exist or does not agree with the mode.
"""
self._check_mode(mode)
if cwd is None:
cwd = os.getcwd()
is_url = False
is_fsspec = False
if isinstance(path, Path):
is_url = path.is_url
is_fsspec = path.is_fsspec
cwd = path.cwd # type: ignore
abs_path = path.abs_path # type: ignore
path = path.rel_path # type: ignore
elif isinstance(path, str):
abs_path = os.path.expanduser(path)
if re.match('^file:///?', abs_path):
abs_path = re.sub('^file:///?', '/', abs_path)
if 'u' in mode and url_support and import_url_validator('Path')(abs_path):
is_url = True
elif 's' in mode and fsspec_support and known_to_fsspec(abs_path):
is_fsspec = True
elif 'f' in mode or 'd' in mode:
abs_path = abs_path if os.path.isabs(abs_path) else os.path.join(cwd, abs_path)
else:
raise TypeError('Expected path to be a string or a Path object.')
if not skip_check and is_url:
if 'r' in mode:
requests = import_requests('Path with URL support')
try:
requests.head(abs_path).raise_for_status()
except requests.HTTPError as ex:
raise TypeError(f'{abs_path} HEAD not accessible :: {ex}') from ex
elif not skip_check and is_fsspec:
fsspec_mode = ''.join(c for c in mode if c in {'r','w'})
if fsspec_mode:
fsspec = import_fsspec('Path')
try:
handle = fsspec.open(abs_path, fsspec_mode)
handle.open()
handle.close()
except FileNotFoundError:
raise TypeError('Path does not exist: '+abs_path)
except PermissionError:
raise TypeError('Path exists but no permission to access: '+abs_path)
elif not skip_check:
ptype = 'Directory' if 'd' in mode else 'File'
if 'c' in mode:
pdir = os.path.realpath(os.path.join(abs_path, '..'))
if not os.path.isdir(pdir):
raise TypeError(ptype+' is not creatable since parent directory does not exist: '+abs_path)
if not os.access(pdir, os.W_OK):
raise TypeError(ptype+' is not creatable since parent directory not writeable: '+abs_path)
if 'd' in mode and os.access(abs_path, os.F_OK) and not os.path.isdir(abs_path):
raise TypeError(ptype+' is not creatable since path already exists: '+abs_path)
if 'f' in mode and os.access(abs_path, os.F_OK) and not os.path.isfile(abs_path):
raise TypeError(ptype+' is not creatable since path already exists: '+abs_path)
else:
if not os.access(abs_path, os.F_OK):
raise TypeError(ptype+' does not exist: '+abs_path)
if 'd' in mode and not os.path.isdir(abs_path):
raise TypeError('Path is not a directory: '+abs_path)
if 'f' in mode and not (os.path.isfile(abs_path) or stat.S_ISFIFO(os.stat(abs_path).st_mode)):
raise TypeError('Path is not a file: '+abs_path)
if 'r' in mode and not os.access(abs_path, os.R_OK):
raise TypeError(ptype+' is not readable: '+abs_path)
if 'w' in mode and not os.access(abs_path, os.W_OK):
raise TypeError(ptype+' is not writeable: '+abs_path)
if 'x' in mode and not os.access(abs_path, os.X_OK):
raise TypeError(ptype+' is not executable: '+abs_path)
if 'D' in mode and os.path.isdir(abs_path):
raise TypeError('Path is a directory: '+abs_path)
if 'F' in mode and (os.path.isfile(abs_path) or stat.S_ISFIFO(os.stat(abs_path).st_mode)):
raise TypeError('Path is a file: '+abs_path)
if 'R' in mode and os.access(abs_path, os.R_OK):
raise TypeError(ptype+' is readable: '+abs_path)
if 'W' in mode and os.access(abs_path, os.W_OK):
raise TypeError(ptype+' is writeable: '+abs_path)
if 'X' in mode and os.access(abs_path, os.X_OK):
raise TypeError(ptype+' is executable: '+abs_path)
self.rel_path = path
self.abs_path = abs_path
self.cwd = cwd
self.mode = mode
self.is_url: bool = is_url
self.is_fsspec: bool = is_fsspec
self.skip_check = skip_check
def __str__(self):
return self.rel_path
def __repr__(self):
cwd = '' if self.rel_path == self.abs_path else ', cwd='+self.cwd
name = 'Path_'+self.mode
if self.skip_check:
name += '_skip_check'
return name+'('+self.rel_path+cwd+')'
def __call__(self, absolute:bool=True) -> str:
"""Returns the path as a string.
Args:
absolute: If false returns the original path given, otherwise the corresponding absolute path.
"""
return self.abs_path if absolute else self.rel_path
def get_content(self, mode:str='r') -> str:
"""Returns the contents of the file or the response of a GET request to the URL."""
if self.is_url:
requests = import_requests('Path with URL support')
response = requests.get(self.abs_path)
response.raise_for_status()
return response.text
elif self.is_fsspec:
fsspec = import_fsspec('Path')
with fsspec.open(self.abs_path, mode) as handle:
with handle as input_file:
return input_file.read()
else:
with open(self.abs_path, mode) as input_file:
return input_file.read()
@staticmethod
def _check_mode(mode:str):
if not isinstance(mode, str):
raise ValueError('Expected mode to be a string.')
if len(set(mode)-set('fdrwxcusFDRWX')) > 0:
raise ValueError('Expected mode to only include [fdrwxcusFDRWX] flags.')
if 'f' in mode and 'd' in mode:
raise ValueError('Both modes "f" and "d" not possible.')
if 'u' in mode and 'd' in mode:
raise ValueError('Both modes "d" and "u" not possible.')
if 's' in mode and 'd' in mode:
raise ValueError('Both modes "d" and "s" not possible.')
class LoggerProperty:
"""Class designed to be inherited by other classes to add a logger property."""
def __init__(self):
"""Initializer for LoggerProperty class."""
if not hasattr(self, '_logger'):
self.logger = None
@property
def logger(self):
"""The logger property for the class.
:getter: Returns the current logger.
:setter: Sets the given logging.Logger as logger or sets the default logger
if given True/str(logger name)/dict(name, level), or disables logging
if given False/None.
Raises:
ValueError: If an invalid logger value is given.
"""
return self._logger
@logger.setter
def logger(self, logger):
if logger is None or (isinstance(logger, bool) and not logger):
self._logger = null_logger
elif isinstance(logger, (bool, str, dict)) and logger:
levels = {'CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'}
level = logging.WARNING
if isinstance(logger, dict) and 'level' in logger:
if logger['level'] not in levels:
raise ValueError(f'Logger level must be one of {levels}.')
level = getattr(logging, logger['level'])
if isinstance(logger, bool) or (isinstance(logger, dict) and 'name' not in logger):
try:
import reconplogger
logger = reconplogger.logger_setup(level=level)
except (ImportError, ValueError):
pass
if not isinstance(logger, logging.Logger):
name = type(self).__name__
if isinstance(logger, str):
name = logger
elif isinstance(logger, dict) and 'name' in logger:
name = logger['name']
logger = logging.getLogger(name)
if len(logger.handlers) == 0:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
self._logger = logger
elif not isinstance(logger, logging.Logger):
raise ValueError('Expected logger to be an instance of logging.Logger or bool or str or dict or None.')
else:
self._logger = logger
| 38.097156 | 120 | 0.605648 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.