hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ddb0735682f82abe1e460ba5b03219087d0df7f
| 578 |
py
|
Python
|
models/__init__.py
|
gheyret/EfficientConformer
|
b28a0aaa3b182f72abaccbeb12df0402adf96097
|
[
"Apache-2.0"
] | 101 |
2021-09-06T03:52:37.000Z
|
2022-03-17T07:57:43.000Z
|
models/__init__.py
|
entn-at/EfficientConformer
|
d75c2dc74c44941ca34c62b4196488d34a4fb3a0
|
[
"Apache-2.0"
] | 11 |
2021-09-13T05:52:33.000Z
|
2022-02-18T04:40:50.000Z
|
models/__init__.py
|
entn-at/EfficientConformer
|
d75c2dc74c44941ca34c62b4196488d34a4fb3a0
|
[
"Apache-2.0"
] | 17 |
2021-09-06T01:34:28.000Z
|
2022-03-24T06:36:46.000Z
|
# Copyright 2021, Maxime Burchi.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 44.461538 | 74 | 0.762976 |
ed83fa9e3cd212e66ac48ece23ab4795bb4b9705
| 3,512 |
py
|
Python
|
sdk/python/pulumi_azure_nextgen/cache/v20180301/get_firewall_rule.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/cache/v20180301/get_firewall_rule.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/cache/v20180301/get_firewall_rule.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetFirewallRuleResult',
'AwaitableGetFirewallRuleResult',
'get_firewall_rule',
]
@pulumi.output_type
class GetFirewallRuleResult:
"""
A firewall rule on a redis cache has a name, and describes a contiguous range of IP addresses permitted to connect
"""
def __init__(__self__, end_ip=None, name=None, start_ip=None, type=None):
if end_ip and not isinstance(end_ip, str):
raise TypeError("Expected argument 'end_ip' to be a str")
pulumi.set(__self__, "end_ip", end_ip)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if start_ip and not isinstance(start_ip, str):
raise TypeError("Expected argument 'start_ip' to be a str")
pulumi.set(__self__, "start_ip", start_ip)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="endIP")
def end_ip(self) -> str:
"""
highest IP address included in the range
"""
return pulumi.get(self, "end_ip")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startIP")
def start_ip(self) -> str:
"""
lowest IP address included in the range
"""
return pulumi.get(self, "start_ip")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetFirewallRuleResult(GetFirewallRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFirewallRuleResult(
end_ip=self.end_ip,
name=self.name,
start_ip=self.start_ip,
type=self.type)
def get_firewall_rule(cache_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
rule_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFirewallRuleResult:
"""
Use this data source to access information about an existing resource.
:param str cache_name: The name of the Redis cache.
:param str resource_group_name: The name of the resource group.
:param str rule_name: The name of the firewall rule.
"""
__args__ = dict()
__args__['cacheName'] = cache_name
__args__['resourceGroupName'] = resource_group_name
__args__['ruleName'] = rule_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:cache/v20180301:getFirewallRule', __args__, opts=opts, typ=GetFirewallRuleResult).value
return AwaitableGetFirewallRuleResult(
end_ip=__ret__.end_ip,
name=__ret__.name,
start_ip=__ret__.start_ip,
type=__ret__.type)
| 32.82243 | 138 | 0.639522 |
a621aa65645c9af8357857baf67d28345fef1cc6
| 3,740 |
py
|
Python
|
custom_components/nicehash_excavator/mining_rig.py
|
MesserschmittX/hacs-nicehash-excavator
|
c8be5963fe05f4f3fc793a770d30fb2575d3a163
|
[
"MIT"
] | 2 |
2022-02-05T04:56:29.000Z
|
2022-02-07T03:54:26.000Z
|
custom_components/nicehash_excavator/mining_rig.py
|
MesserschmittX/hacs-nicehash-excavator
|
c8be5963fe05f4f3fc793a770d30fb2575d3a163
|
[
"MIT"
] | 1 |
2022-02-22T13:42:13.000Z
|
2022-03-15T20:39:11.000Z
|
custom_components/nicehash_excavator/mining_rig.py
|
MesserschmittX/ha-nicehash-excavator-monitor
|
c8be5963fe05f4f3fc793a770d30fb2575d3a163
|
[
"MIT"
] | null | null | null |
"""A MiningRig that connects several devices."""
from __future__ import annotations
import datetime
import homeassistant
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import Callable, HomeAssistant
from .const import (
CONFIG_ENABLE_DEBUG_LOGGING,
CONFIG_HOST_ADDRESS,
CONFIG_HOST_PORT,
CONFIG_NAME,
CONFIG_UPDATE_INTERVAL,
)
from .data_containers import Algorithm, GraphicsCard, Worker
from .excavator import ExcavatorAPI
class MiningRig:
"""The Rig containing devices"""
def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry) -> None:
"""Init MiningRig."""
self._hass = hass
self._name = config_entry.data[CONFIG_NAME]
self._id = config_entry.data[CONFIG_NAME].lower()
try:
self._enable_debug_logging = config_entry.data[CONFIG_ENABLE_DEBUG_LOGGING]
except KeyError:
self._enable_debug_logging = False
self._api = ExcavatorAPI(
config_entry.data[CONFIG_HOST_ADDRESS],
config_entry.data[CONFIG_HOST_PORT],
self._enable_debug_logging,
)
self.algorithms = {}
self.devices = {}
self.workers = {}
self.online = True
self.info = None
self._callbacks = set()
self._remove_update_listener = None
update_interval = config_entry.data.get(CONFIG_UPDATE_INTERVAL)
self.set_update_interval(hass, update_interval)
@property
def mining_rig_id(self) -> str:
"""ID for MiningRig."""
return self._id
async def test_connection(self) -> bool:
"""Test connectivity to the MiningRig."""
self.online = await self._api.test_connection()
return self.online
def register_callback(self, callback: Callable[[], None]) -> None:
"""Register callback, called when MiningRig updates."""
self._callbacks.add(callback)
def remove_callback(self, callback: Callable[[], None]) -> None:
"""Remove previously registered callback."""
self._callbacks.discard(callback)
async def update(self, event=None) -> None:
"""Update MiningRig via Excavator API."""
self.algorithms = await self._api.get_algorithms()
self.devices = await self._api.get_devices()
self.workers = await self._api.get_workers()
self.info = await self._api.get_rig_info()
if self.info is None:
self.online = False
else:
self.online = True
await self.publish_updates()
async def publish_updates(self) -> None:
"""Schedule call all registered callbacks."""
for callback in self._callbacks:
callback()
def set_update_interval(self, hass: HomeAssistant, update_interval: int) -> None:
"""Set new update interval."""
if self._remove_update_listener:
self._remove_update_listener()
self._remove_update_listener = (
homeassistant.helpers.event.async_track_time_interval(
hass, self.update, datetime.timedelta(seconds=update_interval)
)
)
def get_algorithm(self, algorithm_id) -> Algorithm | None:
"""Get algorithm by id."""
if algorithm_id in self.algorithms:
return self.algorithms[algorithm_id]
return None
def get_device(self, device_id) -> GraphicsCard | None:
"""Get device by id."""
if device_id in self.devices:
return self.devices[device_id]
return None
def get_worker(self, worker_id) -> Worker | None:
"""Get worker by id."""
if worker_id in self.workers:
return self.workers[worker_id]
return None
| 33.392857 | 87 | 0.648128 |
891f79b460fd8459e8d99bcad48dfe652a95daae
| 52,603 |
py
|
Python
|
corehq/apps/hqwebapp/models.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | 1 |
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
corehq/apps/hqwebapp/models.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/hqwebapp/models.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe, mark_for_escaping
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop, ugettext_lazy
from corehq import toggles, privileges
from corehq.apps.accounting.dispatcher import AccountingAdminInterfaceDispatcher
from corehq.apps.accounting.models import BillingAccountAdmin, Invoice
from corehq.apps.accounting.utils import is_accounting_admin
from corehq.apps.domain.utils import get_adm_enabled_domains
from corehq.apps.indicators.dispatcher import IndicatorAdminInterfaceDispatcher
from corehq.apps.indicators.utils import get_indicator_domains
from corehq.apps.reminders.util import can_use_survey_reminders
from corehq.apps.smsbillables.dispatcher import SMSAdminInterfaceDispatcher
from django_prbac.exceptions import PermissionDenied
from django_prbac.models import Role, UserRole
from django_prbac.utils import ensure_request_has_privilege
from dimagi.utils.couch.database import get_db
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.reports.dispatcher import (ProjectReportDispatcher,
CustomProjectReportDispatcher)
from corehq.apps.adm.dispatcher import (ADMAdminInterfaceDispatcher,
ADMSectionDispatcher)
from corehq.apps.announcements.dispatcher import (
HQAnnouncementAdminInterfaceDispatcher)
from corehq.toggles import IS_DEVELOPER
def format_submenu_context(title, url=None, html=None,
is_header=False, is_divider=False, data_id=None):
return {
'title': title,
'url': url,
'html': html,
'is_header': is_header,
'is_divider': is_divider,
'data_id': data_id,
}
def format_second_level_context(title, url, menu):
return {
'title': title,
'url': url,
'is_second_level': True,
'submenu': menu,
}
class UITab(object):
title = None
view = None
subtab_classes = None
dispatcher = None
def __init__(self, request, current_url_name, domain=None, couch_user=None,
project=None, org=None):
if self.subtab_classes:
self.subtabs = [cls(request, current_url_name, domain=domain,
couch_user=couch_user, project=project,
org=org)
for cls in self.subtab_classes]
else:
self.subtabs = None
self.domain = domain
self.couch_user = couch_user
self.project = project
self.org = org
# This should not be considered as part of the subclass API unless it
# is necessary. Try to add new explicit parameters instead.
self._request = request
self._current_url_name = current_url_name
@property
def dropdown_items(self):
# todo: add default implementation which looks at sidebar_items and
# sees which ones have is_dropdown_visible or something like that.
# Also make it work for tabs with subtabs.
return []
@property
@memoized
def sidebar_items(self):
if self.dispatcher:
context = {
'request': self._request,
'domain': self.domain,
}
return self.dispatcher.navigation_sections(context)
else:
return []
@property
def is_viewable(self):
"""
Whether the tab should be displayed. Subclass implementations can skip
checking whether domain, couch_user, or project is not None before
accessing an attribute of them -- this property is accessed in
real_is_viewable and wrapped in a try block that returns False in the
case of an AttributeError for any of those variables.
"""
raise NotImplementedError()
@property
@memoized
def real_is_viewable(self):
if self.subtabs:
return any(st.real_is_viewable for st in self.subtabs)
else:
try:
return self.is_viewable
except AttributeError:
return False
@property
@memoized
def url(self):
try:
if self.domain:
return reverse(self.view, args=[self.domain])
if self.org:
return reverse(self.view, args=[self.org.name])
except Exception:
pass
try:
return reverse(self.view)
except Exception:
return None
@property
def is_active_shortcircuit(self):
return None
@property
def is_active_fast(self):
shortcircuit = self.is_active_shortcircuit
if shortcircuit is not None:
return shortcircuit
request_path = self._request.get_full_path()
return self.url and request_path.startswith(self.url)
@property
@memoized
def is_active(self):
shortcircuit = self.is_active_shortcircuit
if shortcircuit is not None:
return shortcircuit
request_path = self._request.get_full_path()
if self.urls:
if (any(request_path.startswith(url) for url in self.urls) or
self._current_url_name in self.subpage_url_names):
return True
elif self.subtabs and any(st.is_active for st in self.subtabs):
return True
@property
@memoized
def urls(self):
urls = [self.url] if self.url else []
if self.subtabs:
for st in self.subtabs:
urls.extend(st.urls)
try:
for name, section in self.sidebar_items:
urls.extend(item['url'] for item in section)
except Exception:
# tried to get urls for another tab on a page that doesn't provide
# the necessary couch_user, domain, project, etc. value
pass
return urls
@property
@memoized
def subpage_url_names(self):
"""
List of all url names of subpages of sidebar items that get
displayed only when you're on that subpage.
"""
names = []
if self.subtabs:
for st in self.subtabs:
names.extend(st.subpage_url_names)
try:
for name, section in self.sidebar_items:
names.extend(subpage['urlname']
for item in section
for subpage in item.get('subpages', []))
except Exception:
pass
return names
@property
def css_id(self):
return self.__class__.__name__
class ProjectReportsTab(UITab):
title = ugettext_noop("Project Reports")
view = "corehq.apps.reports.views.default"
@property
def is_active_shortcircuit(self):
# HACK. We need a more overarching way to avoid doing things this way
if 'reports/adm' in self._request.get_full_path():
return False
@property
def is_viewable(self):
return (self.domain and self.project and not self.project.is_snapshot and
(self.couch_user.can_view_reports() or
self.couch_user.get_viewable_reports()))
@property
def sidebar_items(self):
context = {
'request': self._request,
'domain': self.domain,
}
tools = [(_("Tools"), [
{'title': _('My Saved Reports'),
'url': reverse('saved_reports', args=[self.domain]),
'icon': 'icon-tasks'}
])]
project_reports = ProjectReportDispatcher.navigation_sections(context)
custom_reports = CustomProjectReportDispatcher.navigation_sections(context)
return tools + project_reports + custom_reports
class ADMReportsTab(UITab):
title = ugettext_noop("Active Data Management")
view = "corehq.apps.adm.views.default_adm_report"
dispatcher = ADMSectionDispatcher
@property
def is_viewable(self):
if not self.project or self.project.commtrack_enabled:
return False
adm_enabled_projects = get_adm_enabled_domains()
return (not self.project.is_snapshot and
self.domain in adm_enabled_projects and
(self.couch_user.can_view_reports() or
self.couch_user.get_viewable_reports()))
class IndicatorAdminTab(UITab):
title = ugettext_noop("Administer Indicators")
view = "corehq.apps.indicators.views.default_admin"
dispatcher = IndicatorAdminInterfaceDispatcher
@property
def is_viewable(self):
indicator_enabled_projects = get_indicator_domains()
return self.couch_user.can_edit_data() and self.domain in indicator_enabled_projects
class DashboardTab(UITab):
title = ugettext_noop("Dashboard")
view = 'corehq.apps.dashboard.views.dashboard_default'
@property
def is_viewable(self):
return (self.couch_user
and toggles.DASHBOARD_PREVIEW.enabled(self.couch_user.username))
class ReportsTab(UITab):
title = ugettext_noop("Reports")
view = "corehq.apps.reports.views.saved_reports"
subtab_classes = (ProjectReportsTab, ADMReportsTab, IndicatorAdminTab)
class ProjectInfoTab(UITab):
title = ugettext_noop("Project Info")
view = "corehq.apps.appstore.views.project_info"
@property
def is_viewable(self):
return self.project and self.project.is_snapshot
class CommTrackSetupTab(UITab):
title = ugettext_noop("Setup")
view = "corehq.apps.commtrack.views.default"
@property
def dropdown_items(self):
# circular import
from corehq.apps.commtrack.views import (
CommTrackSettingsView,
ProductListView,
DefaultConsumptionView,
ProgramListView,
SMSSettingsView,
)
from corehq.apps.locations.views import (
LocationsListView,
LocationSettingsView,
)
dropdown_items = [(_(view.page_title), view) for view in (
ProductListView,
LocationsListView,
LocationSettingsView,
ProgramListView,
SMSSettingsView,
DefaultConsumptionView,
CommTrackSettingsView,
)
]
return [
format_submenu_context(
item[0],
url=reverse(item[1].urlname, args=[self.domain])
) for item in dropdown_items
]
@property
def is_viewable(self):
return self.project.commtrack_enabled and self.couch_user.is_domain_admin()
@property
def sidebar_items(self):
# circular import
from corehq.apps.commtrack.views import (
CommTrackSettingsView,
ProductListView,
NewProductView,
EditProductView,
DefaultConsumptionView,
ProgramListView,
NewProgramView,
EditProgramView,
SMSSettingsView,
ILSConfigView,
)
from corehq.apps.locations.views import (
LocationsListView,
NewLocationView,
EditLocationView,
FacilitySyncView,
LocationImportView,
LocationImportStatusView,
LocationSettingsView,
)
items = []
items.append([_('CommTrack Setup'), [
# products
{
'title': ProductListView.page_title,
'url': reverse(ProductListView.urlname, args=[self.domain]),
'subpages': [
{
'title': NewProductView.page_title,
'urlname': NewProductView.urlname,
},
{
'title': EditProductView.page_title,
'urlname': EditProductView.urlname,
},
]
},
# locations
{
'title': LocationsListView.page_title,
'url': reverse(LocationsListView.urlname, args=[self.domain]),
'subpages': [
{
'title': NewLocationView.page_title,
'urlname': NewLocationView.urlname,
},
{
'title': EditLocationView.page_title,
'urlname': EditLocationView.urlname,
},
{
'title': LocationImportView.page_title,
'urlname': LocationImportView.urlname,
},
{
'title': LocationImportStatusView.page_title,
'urlname': LocationImportStatusView.urlname,
},
]
},
# locations (advanced)
{
'title': LocationSettingsView.page_title,
'url': reverse(LocationSettingsView.urlname, args=[self.domain]),
},
# programs
{
'title': ProgramListView.page_title,
'url': reverse(ProgramListView.urlname, args=[self.domain]),
'subpages': [
{
'title': NewProgramView.page_title,
'urlname': NewProgramView.urlname,
},
{
'title': EditProgramView.page_title,
'urlname': EditProgramView.urlname,
},
]
},
# sms
{
'title': SMSSettingsView.page_title,
'url': reverse(SMSSettingsView.urlname, args=[self.domain]),
},
# consumption
{
'title': DefaultConsumptionView.page_title,
'url': reverse(DefaultConsumptionView.urlname, args=[self.domain]),
},
# settings
{
'title': CommTrackSettingsView.page_title,
'url': reverse(CommTrackSettingsView.urlname, args=[self.domain]),
},
# external sync
{
'title': FacilitySyncView.page_title,
'url': reverse(FacilitySyncView.urlname, args=[self.domain]),
},
]])
if self.couch_user and (self.couch_user.is_superuser or IS_DEVELOPER.enabled(self.couch_user.username)):
items[0][1].append({
'title': ILSConfigView.page_title,
'url': reverse(ILSConfigView.urlname, args=[self.domain]),
})
return items
class ProjectDataTab(UITab):
title = ugettext_noop("Data")
view = "corehq.apps.data_interfaces.views.default"
@property
@memoized
def can_edit_commcare_data(self):
return self.couch_user.can_edit_data()
@property
@memoized
def can_export_data(self):
return self.project and not self.project.is_snapshot and self.couch_user.can_export_data()
@property
def is_viewable(self):
return self.domain and (self.can_edit_commcare_data or self.can_export_data)
@property
def sidebar_items(self):
items = []
context = {
'request': self._request,
'domain': self.domain,
}
if self.can_export_data:
from corehq.apps.data_interfaces.dispatcher import DataInterfaceDispatcher
items.extend(DataInterfaceDispatcher.navigation_sections(context))
if self.can_edit_commcare_data:
from corehq.apps.data_interfaces.dispatcher import EditDataInterfaceDispatcher
edit_section = EditDataInterfaceDispatcher.navigation_sections(context)
from corehq.apps.data_interfaces.views import CaseGroupListView, CaseGroupCaseManagementView
edit_section[0][1].append({
'title': CaseGroupListView.page_title,
'url': reverse(CaseGroupListView.urlname, args=[self.domain]),
'subpages': [
{
'title': CaseGroupCaseManagementView.page_title,
'urlname': CaseGroupCaseManagementView.urlname,
}
]
})
items.extend(edit_section)
return items
class ApplicationsTab(UITab):
view = "corehq.apps.app_manager.views.default"
@property
def title(self):
if self.project.commconnect_enabled:
return _("Surveys")
else:
return _("Applications")
@classmethod
def make_app_title(cls, app_name, doc_type):
return mark_safe("%s%s" % (
mark_for_escaping(app_name or '(Untitled)'),
mark_for_escaping(' (Remote)' if doc_type == 'RemoteApp' else ''),
))
@property
def dropdown_items(self):
# todo async refresh submenu when on the applications page and you change the application name
key = [self.domain]
apps = get_db().view('app_manager/applications_brief',
reduce=False,
startkey=key,
endkey=key+[{}],
#stale=settings.COUCH_STALE_QUERY,
).all()
submenu_context = []
if not apps:
return submenu_context
submenu_context.append(format_submenu_context(_('My Applications'), is_header=True))
for app in apps:
app_info = app['value']
if app_info:
app_id = app_info['_id']
app_name = app_info['name']
app_doc_type = app_info['doc_type']
url = reverse('view_app', args=[self.domain, app_id]) if self.couch_user.can_edit_apps() \
else reverse('release_manager', args=[self.domain, app_id])
app_title = self.make_app_title(app_name, app_doc_type)
submenu_context.append(format_submenu_context(
app_title,
url=url,
data_id=app_id,
))
if self.couch_user.can_edit_apps():
submenu_context.append(format_submenu_context(None, is_divider=True))
newapp_options = [
format_submenu_context(None, html=self._new_app_link(_('Blank Application'))),
format_submenu_context(None, html=self._new_app_link(_('RemoteApp (Advanced Users Only)'),
is_remote=True)),
]
newapp_options.append(format_submenu_context(_('Visit CommCare Exchange to copy existing app...'),
url=reverse('appstore')))
submenu_context.append(format_second_level_context(
_('New Application...'),
'#',
newapp_options
))
return submenu_context
def _new_app_link(self, title, is_remote=False):
template = "app_manager/partials/new_app_link.html"
return mark_safe(render_to_string(template, {
'domain': self.domain,
'is_remote': is_remote,
'action_text': title,
}))
@property
def is_viewable(self):
couch_user = self.couch_user
return (self.domain and couch_user and
(couch_user.is_web_user() or couch_user.can_edit_apps()) and
(couch_user.is_member_of(self.domain) or couch_user.is_superuser))
class CloudcareTab(UITab):
title = ugettext_noop("CloudCare")
view = "corehq.apps.cloudcare.views.default"
@property
def is_viewable(self):
try:
ensure_request_has_privilege(self._request, privileges.CLOUDCARE)
except PermissionDenied:
return False
return (self.domain
and (self.couch_user.can_edit_data() or self.couch_user.is_commcare_user())
and not self.project.commconnect_enabled)
class MessagingTab(UITab):
title = ugettext_noop("Messaging")
view = "corehq.apps.sms.views.default"
@property
def is_viewable(self):
return (self.can_access_reminders or self.can_access_sms) and (
self.project and not (self.project.is_snapshot or
self.couch_user.is_commcare_user())
) and self.couch_user.can_edit_data()
@property
@memoized
def can_access_sms(self):
try:
ensure_request_has_privilege(self._request, privileges.OUTBOUND_SMS)
except PermissionDenied:
return False
return True
@property
@memoized
def can_access_reminders(self):
try:
ensure_request_has_privilege(self._request, privileges.REMINDERS_FRAMEWORK)
return True
except PermissionDenied:
return False
@property
def sidebar_items(self):
from corehq.apps.reports.standard.sms import MessageLogReport
def reminder_subtitle(form=None, **context):
return form['nickname'].value
def keyword_subtitle(keyword=None, **context):
return keyword.keyword
reminders_urls = []
if self.can_access_reminders:
if toggles.REMINDERS_UI_PREVIEW.enabled(self.couch_user.username):
from corehq.apps.reminders.views import (
EditScheduledReminderView,
CreateScheduledReminderView,
RemindersListView,
)
reminders_list_url = reverse(RemindersListView.urlname, args=[self.domain])
edit_reminder_urlname = EditScheduledReminderView.urlname
new_reminder_urlname = CreateScheduledReminderView.urlname
else:
reminders_list_url = reverse('list_reminders', args=[self.domain])
edit_reminder_urlname = 'edit_complex'
new_reminder_urlname = 'add_complex_reminder_schedule'
reminders_urls.extend([
{
'title': _("Reminders"),
'url': reminders_list_url,
'subpages': [
{
'title': reminder_subtitle,
'urlname': edit_reminder_urlname
},
{
'title': _("Schedule Reminder"),
'urlname': new_reminder_urlname,
},
{
'title': _("Schedule Multi Event Reminder"),
'urlname': 'create_complex_reminder_schedule',
},
],
},
{
'title': _("Reminder Calendar"),
'url': reverse('scheduled_reminders', args=[self.domain])
},
])
can_use_survey = can_use_survey_reminders(self._request)
if can_use_survey:
from corehq.apps.reminders.views import (
KeywordsListView, AddNormalKeywordView,
AddStructuredKeywordView, EditNormalKeywordView,
EditStructuredKeywordView,
)
if toggles.REMINDERS_UI_PREVIEW.enabled(self.couch_user.username):
keyword_list_url = reverse(KeywordsListView.urlname, args=[self.domain])
else:
keyword_list_url = reverse('manage_keywords', args=[self.domain])
reminders_urls.append({
'title': _("Keywords"),
'url': keyword_list_url,
'subpages': [
{
'title': keyword_subtitle,
'urlname': 'edit_keyword'
},
{
'title': _("New Keyword"),
'urlname': 'add_keyword',
},
{
'title': AddNormalKeywordView.page_title,
'urlname': AddNormalKeywordView.urlname,
},
{
'title': AddStructuredKeywordView.page_title,
'urlname': AddStructuredKeywordView.urlname,
},
{
'title': EditNormalKeywordView.page_title,
'urlname': EditNormalKeywordView.urlname,
},
{
'title': EditStructuredKeywordView.page_title,
'urlname': EditStructuredKeywordView.urlname,
},
],
})
if self.can_access_reminders:
reminders_urls.append({
'title': _("Reminders in Error"),
'url': reverse('reminders_in_error', args=[self.domain])
})
items = []
messages_urls = []
if self.can_access_sms:
messages_urls.extend([
{
'title': _('Compose SMS Message'),
'url': reverse('sms_compose_message', args=[self.domain])
},
])
if self.can_access_reminders:
messages_urls.extend([
{
'title': _("Broadcast Messages"),
'url': reverse('one_time_reminders', args=[self.domain]),
'subpages': [
{
'title': _("Edit Broadcast"),
'urlname': 'edit_one_time_reminder'
},
{
'title': _("New Broadcast"),
'urlname': 'add_one_time_reminder'
},
{
'title': _("New Broadcast"),
'urlname': 'copy_one_time_reminder'
},
]
},
])
if self.can_access_sms:
messages_urls.extend([
{
'title': _('Message Log'),
'url': MessageLogReport.get_url(domain=self.domain)
},
])
if messages_urls:
items.append((_("Messages"), messages_urls))
if reminders_urls:
items.append((_("Data Collection and Reminders"), reminders_urls))
if self.project.commtrack_enabled:
from corehq.apps.sms.views import SubscribeSMSView
items.append(
(_("CommTrack"), [
{'title': ugettext_lazy("Subscribe to SMS Reports"),
'url': reverse(SubscribeSMSView.urlname, args=[self.domain])},])
)
if self.couch_user.is_previewer():
items[0][1].append(
{'title': _('Chat'),
'url': reverse('chat_contacts', args=[self.domain])}
)
if self.project.survey_management_enabled and can_use_survey:
def sample_title(form=None, **context):
return form['name'].value
def survey_title(form=None, **context):
return form['name'].value
items.append(
(_("Survey Management"), [
{'title': _("Samples"),
'url': reverse('sample_list', args=[self.domain]),
'subpages': [
{'title': sample_title,
'urlname': 'edit_sample'},
{'title': _("New Sample"),
'urlname': 'add_sample'},
]},
{'title': _("Surveys"),
'url': reverse('survey_list', args=[self.domain]),
'subpages': [
{'title': survey_title,
'urlname': 'edit_survey'},
{'title': _("New Survey"),
'urlname': 'add_survey'},
]},
])
)
settings_pages = []
if self.can_access_sms:
from corehq.apps.sms.views import (
DomainSmsGatewayListView, AddDomainGatewayView,
EditDomainGatewayView,
)
if toggles.REMINDERS_UI_PREVIEW.enabled(self.couch_user.username):
sms_connectivity_url = reverse(
DomainSmsGatewayListView.urlname, args=[self.domain]
)
else:
sms_connectivity_url = reverse(
'list_domain_backends', args=[self.domain]
)
settings_pages.append({
'title': _('SMS Connectivity'),
'url': sms_connectivity_url,
'subpages': [
{
'title': _('Add Connection'),
'urlname': 'add_domain_backend'
},
{
'title': _("Add Connection"),
'urlname': AddDomainGatewayView.urlname,
},
{
'title': _('Edit Connection'),
'urlname': 'edit_domain_backend'
},
{
'title': _("Edit Connection"),
'urlname': EditDomainGatewayView.urlname,
},
],
})
if self.couch_user.is_superuser or self.couch_user.is_domain_admin(self.domain):
settings_pages.extend([
{'title': ugettext_lazy("General Settings"),
'url': reverse('sms_settings', args=[self.domain])},
{'title': ugettext_lazy("Languages"),
'url': reverse('sms_languages', args=[self.domain])},
])
if settings_pages:
items.append((_("Settings"), settings_pages))
return items
@property
def dropdown_items(self):
return []
class ProjectUsersTab(UITab):
title = ugettext_noop("Users")
view = "users_default"
@property
def dropdown_items(self):
return []
@property
def is_viewable(self):
return self.domain and (self.couch_user.can_edit_commcare_users() or
self.couch_user.can_edit_web_users())
@property
def is_active_shortcircuit(self):
if not self.domain:
return False
@property
@memoized
def is_active(self):
cloudcare_settings_url = reverse('cloudcare_app_settings', args=[self.domain])
full_path = self._request.get_full_path()
return (super(ProjectUsersTab, self).is_active
or full_path.startswith(cloudcare_settings_url))
@property
def can_view_cloudcare(self):
try:
ensure_request_has_privilege(self._request, privileges.CLOUDCARE)
except PermissionDenied:
return False
return self.couch_user.is_domain_admin()
@property
def sidebar_items(self):
items = []
if self.couch_user.can_edit_commcare_users():
def commcare_username(request=None, couch_user=None, **context):
if (couch_user.user_id != request.couch_user.user_id or couch_user.is_commcare_user()):
username = couch_user.username_in_report
if couch_user.is_deleted():
username += " (%s)" % _("Deleted")
return mark_safe(username)
else:
return None
from corehq.apps.users.views.mobile import EditCommCareUserView, ConfirmBillingAccountForExtraUsersView
mobile_users_menu = [
{'title': _('Mobile Workers'),
'url': reverse('commcare_users', args=[self.domain]),
'description': _("Create and manage users for CommCare and CloudCare."),
'subpages': [
{'title': commcare_username,
'urlname': EditCommCareUserView.urlname},
{'title': _('New Mobile Worker'),
'urlname': 'add_commcare_account'},
{'title': _('Bulk Upload'),
'urlname': 'upload_commcare_users'},
{'title': ConfirmBillingAccountForExtraUsersView.page_title,
'urlname': ConfirmBillingAccountForExtraUsersView.urlname},
]},
{'title': _('Groups'),
'url': reverse('all_groups', args=[self.domain]),
'description': _("Create and manage reporting and case sharing groups for Mobile Workers."),
'subpages': [
{'title': lambda **context: (
"%s %s" % (_("Editing"), context['group'].name)),
'urlname': 'group_members'},
{'title': _('Membership Info'),
'urlname': 'group_membership'}
]}
]
if self.can_view_cloudcare:
mobile_users_menu.append({
'title': _('CloudCare Permissions'),
'url': reverse('cloudcare_app_settings',
args=[self.domain])
})
items.append((_('Application Users'), mobile_users_menu))
if self.couch_user.can_edit_web_users():
def web_username(request=None, couch_user=None, **context):
if (couch_user.user_id != request.couch_user.user_id or
not couch_user.is_commcare_user()):
username = couch_user.human_friendly_name
if couch_user.is_deleted():
username += " (%s)" % _("Deleted")
return mark_safe(username)
else:
return None
from corehq.apps.users.views import (EditWebUserView, EditMyAccountDomainView, ListWebUsersView)
items.append((_('Project Users'), [
{'title': ListWebUsersView.page_title,
'url': reverse(ListWebUsersView.urlname, args=[self.domain]),
'description': _("Grant other CommCare HQ users access to your project and manage user roles."),
'subpages': [
{
'title': _("Invite Web User"),
'urlname': 'invite_web_user'
},
{
'title': web_username,
'urlname': EditWebUserView.urlname
},
{
'title': _('My Information'),
'urlname': EditMyAccountDomainView.urlname
}
]}
]))
return items
class ProjectSettingsTab(UITab):
title = ugettext_noop("Project Settings")
view = 'domain_settings_default'
@property
def dropdown_items(self):
return []
@property
def is_viewable(self):
return self.domain and self.couch_user and self.couch_user.is_domain_admin(self.domain)
@property
def sidebar_items(self):
items = []
user_is_admin = self.couch_user.is_domain_admin(self.domain)
project_info = []
if user_is_admin:
from corehq.apps.domain.views import EditBasicProjectInfoView, EditDeploymentProjectInfoView
project_info.extend([
{
'title': _(EditBasicProjectInfoView.page_title),
'url': reverse(EditBasicProjectInfoView.urlname, args=[self.domain])
},
{
'title': _(EditDeploymentProjectInfoView.page_title),
'url': reverse(EditDeploymentProjectInfoView.urlname, args=[self.domain])
}
])
from corehq.apps.domain.views import EditMyProjectSettingsView
project_info.append({
'title': _(EditMyProjectSettingsView.page_title),
'url': reverse(EditMyProjectSettingsView.urlname, args=[self.domain])
})
can_view_orgs = (user_is_admin
and self.project and self.project.organization)
if can_view_orgs:
try:
ensure_request_has_privilege(self._request, privileges.CROSS_PROJECT_REPORTS)
except PermissionDenied:
can_view_orgs = False
if can_view_orgs:
from corehq.apps.domain.views import OrgSettingsView
project_info.append({
'title': _(OrgSettingsView.page_title),
'url': reverse(OrgSettingsView.urlname, args=[self.domain])
})
items.append((_('Project Information'), project_info))
if user_is_admin:
administration = [
{
'title': _('CommCare Exchange'),
'url': reverse('domain_snapshot_settings', args=[self.domain])
},
{
'title': _('Multimedia Sharing'),
'url': reverse('domain_manage_multimedia', args=[self.domain])
}
]
def forward_name(repeater_type=None, **context):
if repeater_type == 'FormRepeater':
return _("Forward Forms")
elif repeater_type == 'ShortFormRepeater':
return _("Forward Form Stubs")
elif repeater_type == 'CaseRepeater':
return _("Forward Cases")
administration.extend([
{'title': _('Data Forwarding'),
'url': reverse('domain_forwarding', args=[self.domain]),
'subpages': [
{
'title': forward_name,
'urlname': 'add_repeater',
},
{
'title': forward_name,
'urlname': 'add_form_repeater',
},
]}
])
administration.append({
'title': _('Feature Previews'),
'url': reverse('feature_previews', args=[self.domain])
})
items.append((_('Project Administration'), administration))
from corehq.apps.users.models import WebUser
if isinstance(self.couch_user, WebUser):
user_is_billing_admin, billing_account = BillingAccountAdmin.get_admin_status_and_account(
self.couch_user, self.domain)
if user_is_billing_admin or self.couch_user.is_superuser:
from corehq.apps.domain.views import (
DomainSubscriptionView, EditExistingBillingAccountView,
DomainBillingStatementsView, ConfirmSubscriptionRenewalView,
)
subscription = [
{
'title': DomainSubscriptionView.page_title,
'url': reverse(DomainSubscriptionView.urlname, args=[self.domain]),
'subpages': [
{
'title': ConfirmSubscriptionRenewalView.page_title,
'urlname': ConfirmSubscriptionRenewalView.urlname,
'url': reverse(ConfirmSubscriptionRenewalView.urlname, args=[self.domain]),
}
]
},
]
if billing_account is not None:
subscription.append(
{
'title': EditExistingBillingAccountView.page_title,
'url': reverse(EditExistingBillingAccountView.urlname, args=[self.domain]),
},
)
if (billing_account is not None
and Invoice.exists_for_domain(self.domain)
):
subscription.append(
{
'title': DomainBillingStatementsView.page_title,
'url': reverse(DomainBillingStatementsView.urlname, args=[self.domain]),
}
)
items.append((_('Subscription'), subscription))
if self.couch_user.is_superuser:
from corehq.apps.domain.views import EditInternalDomainInfoView, EditInternalCalculationsView
internal_admin = [{
'title': _(EditInternalDomainInfoView.page_title),
'url': reverse(EditInternalDomainInfoView.urlname, args=[self.domain])
},
{
'title': _(EditInternalCalculationsView.page_title),
'url': reverse(EditInternalCalculationsView.urlname, args=[self.domain])
}]
items.append((_('Internal Data (Dimagi Only)'), internal_admin))
return items
class MySettingsTab(UITab):
title = ugettext_noop("My Settings")
view = 'default_my_settings'
@property
def dropdown_items(self):
return []
@property
def is_viewable(self):
return self.couch_user is not None
@property
def sidebar_items(self):
from corehq.apps.settings.views import MyAccountSettingsView, MyProjectsList, ChangeMyPasswordView
items = [
(_("Manage My Settings"), (
{
'title': _(MyAccountSettingsView.page_title),
'url': reverse(MyAccountSettingsView.urlname),
},
{
'title': _(MyProjectsList.page_title),
'url': reverse(MyProjectsList.urlname),
},
{
'title': _(ChangeMyPasswordView.page_title),
'url': reverse(ChangeMyPasswordView.urlname),
},
))
]
return items
class AdminReportsTab(UITab):
title = ugettext_noop("Admin Reports")
view = "corehq.apps.hqadmin.views.default"
@property
def sidebar_items(self):
# todo: convert these to dispatcher-style like other reports
if self.couch_user and (not self.couch_user.is_superuser and IS_DEVELOPER.enabled(self.couch_user.username)):
return [
(_('Administrative Reports'), [
{'title': _('System Info'),
'url': reverse('system_info')},
])]
admin_operations = [
{'title': _('View/Update Domain Information'),
'url': reverse('domain_update')},
]
if self.couch_user and self.couch_user.is_staff:
admin_operations.extend([
{'title': _('Mass Email Users'),
'url': reverse('mass_email')},
{'title': _('PillowTop Errors'),
'url': reverse('admin_report_dispatcher', args=('pillow_errors',))},
])
return [
(_('Administrative Reports'), [
{'title': _('Project Space List'),
'url': reverse('admin_report_dispatcher', args=('domains',))},
{'title': _('User List'),
'url': reverse('admin_report_dispatcher', args=('user_list',))},
{'title': _('Application List'),
'url': reverse('admin_report_dispatcher', args=('app_list',))},
{'title': _('Domain Activity Report'),
'url': reverse('domain_activity_report')},
{'title': _('Message Logs Across All Domains'),
'url': reverse('message_log_report')},
{'title': _('Global Statistics'),
'url': reverse('global_report')},
{'title': _('CommCare Versions'),
'url': reverse('commcare_version_report')},
{'title': _('Submissions & Error Statistics per Domain'),
'url': reverse('global_submissions_errors')},
{'title': _('System Info'),
'url': reverse('system_info')},
{'title': _('Mobile User Reports'),
'url': reverse('mobile_user_reports')},
{'title': _('Loadtest Report'),
'url': reverse('loadtest_report')},
]), (_('Administrative Operations'), admin_operations)]
@property
def is_viewable(self):
return self.couch_user and (self.couch_user.is_superuser or IS_DEVELOPER.enabled(self.couch_user.username))
class GlobalADMConfigTab(UITab):
title = ugettext_noop("Global ADM Report Configuration")
view = "corehq.apps.adm.views.default_adm_admin"
dispatcher = ADMAdminInterfaceDispatcher
@property
def is_viewable(self):
return self.couch_user and self.couch_user.is_superuser
class AccountingTab(UITab):
title = ugettext_noop("Accounting")
view = "accounting_default"
dispatcher = AccountingAdminInterfaceDispatcher
@property
def is_viewable(self):
return is_accounting_admin(self._request.user)
@property
@memoized
def sidebar_items(self):
items = super(AccountingTab, self).sidebar_items
from corehq.apps.accounting.views import ManageAccountingAdminsView
items.append(('Permissions', (
{
'title': ManageAccountingAdminsView.page_title,
'url': reverse(ManageAccountingAdminsView.urlname),
},
)))
if toggles.INVOICE_TRIGGER.enabled(self.couch_user.username):
from corehq.apps.accounting.views import (
TriggerInvoiceView, TriggerBookkeeperEmailView,
TestRenewalEmailView,
)
items.append(('Other Actions', (
{
'title': TriggerInvoiceView.page_title,
'url': reverse(TriggerInvoiceView.urlname),
},
{
'title': TriggerBookkeeperEmailView.page_title,
'url': reverse(TriggerBookkeeperEmailView.urlname),
},
{
'title': TestRenewalEmailView.page_title,
'url': reverse(TestRenewalEmailView.urlname),
}
)))
return items
class SMSAdminTab(UITab):
title = ugettext_noop("SMS Connectivity & Billing")
view = "default_sms_admin_interface"
dispatcher = SMSAdminInterfaceDispatcher
@property
@memoized
def sidebar_items(self):
items = super(SMSAdminTab, self).sidebar_items
items.append((_('SMS Connectivity'), [
{'title': _('SMS Connections'),
'url': reverse('list_backends'),
'subpages': [
{'title': _('Add Connection'),
'urlname': 'add_backend'},
{'title': _('Edit Connection'),
'urlname': 'edit_backend'},
]},
{'title': _('SMS Country-Connection Map'),
'url': reverse('global_backend_map')},
]))
return items
@property
def is_viewable(self):
return self.couch_user and self.couch_user.is_superuser
class FeatureFlagsTab(UITab):
title = ugettext_noop("Feature Flags")
view = "toggle_list"
@property
def is_viewable(self):
return self.couch_user and self.couch_user.is_superuser
class AnnouncementsTab(UITab):
title = ugettext_noop("Announcements")
view = "corehq.apps.announcements.views.default_announcement"
dispatcher = HQAnnouncementAdminInterfaceDispatcher
@property
def is_viewable(self):
return self.couch_user and self.couch_user.is_superuser
class AdminTab(UITab):
title = ugettext_noop("Admin")
view = "corehq.apps.hqadmin.views.default"
subtab_classes = (
AdminReportsTab,
GlobalADMConfigTab,
SMSAdminTab,
AnnouncementsTab,
AccountingTab,
FeatureFlagsTab
)
@property
def dropdown_items(self):
if self.couch_user and not self.couch_user.is_superuser and (IS_DEVELOPER.enabled(self.couch_user.username)):
return [format_submenu_context(_("System Info"), url=reverse("system_info"))]
submenu_context = [
format_submenu_context(_("Reports"), is_header=True),
format_submenu_context(_("Admin Reports"), url=reverse("default_admin_report")),
format_submenu_context(_("System Info"), url=reverse("system_info")),
format_submenu_context(_("Management"), is_header=True),
format_submenu_context(mark_for_escaping(_("ADM Reports & Columns")),
url=reverse("default_adm_admin_interface")),
format_submenu_context(mark_for_escaping(_("Commands")), url=reverse("management_commands")),
# format_submenu_context(mark_for_escaping("HQ Announcements"),
# url=reverse("default_announcement_admin")),
]
try:
if AccountingTab(self._request, self._current_url_name).is_viewable:
submenu_context.append(format_submenu_context(AccountingTab.title, url=reverse('accounting_default')))
except Exception:
pass
try:
submenu_context.append(format_submenu_context(mark_for_escaping(_("Old SMS Billing")),
url=reverse("billing_default")))
except Exception:
pass
submenu_context.extend([
format_submenu_context(_("SMS Connectivity & Billing"), url=reverse("default_sms_admin_interface")),
format_submenu_context(_("Feature Flags"), url=reverse("toggle_list")),
format_submenu_context(None, is_divider=True),
format_submenu_context(_("Django Admin"), url="/admin")
])
return submenu_context
@property
def is_viewable(self):
return self.couch_user and (self.couch_user.is_superuser or IS_DEVELOPER.enabled(self.couch_user.username))
class ExchangeTab(UITab):
title = ugettext_noop("Exchange")
view = "corehq.apps.appstore.views.appstore"
@property
def dropdown_items(self):
submenu_context = None
if self.domain and self.couch_user.is_domain_admin(self.domain):
submenu_context = [
format_submenu_context(_("CommCare Exchange"), url=reverse("appstore")),
format_submenu_context(_("Publish this project"),
url=reverse("domain_snapshot_settings",
args=[self.domain]))
]
return submenu_context
@property
def is_viewable(self):
couch_user = self.couch_user
return (self.domain and couch_user and couch_user.can_edit_apps() and
(couch_user.is_member_of(self.domain) or couch_user.is_superuser))
class OrgTab(UITab):
@property
def is_viewable(self):
return self.org and self.couch_user and (self.couch_user.is_member_of_org(self.org) or self.couch_user.is_superuser)
class OrgReportTab(OrgTab):
title = ugettext_noop("Reports")
view = "corehq.apps.orgs.views.base_report"
@property
def dropdown_items(self):
return [
format_submenu_context(_("Projects Table"), url=reverse("orgs_report", args=(self.org.name,))),
format_submenu_context(_("Form Data"), url=reverse("orgs_stats", args=(self.org.name, "forms"))),
format_submenu_context(_("Case Data"), url=reverse("orgs_stats", args=(self.org.name, "cases"))),
format_submenu_context(_("User Data"), url=reverse("orgs_stats", args=(self.org.name, "users"))),
]
class OrgSettingsTab(OrgTab):
title = ugettext_noop("Settings")
view = "corehq.apps.orgs.views.orgs_landing"
@property
def dropdown_items(self):
return [
format_submenu_context(_("Projects"), url=reverse("orgs_landing", args=(self.org.name,))),
format_submenu_context(_("Teams"), url=reverse("orgs_teams", args=(self.org.name,))),
format_submenu_context(_("Members"), url=reverse("orgs_stats", args=(self.org.name,))),
]
| 36.682706 | 124 | 0.552858 |
21e22300ea1e8761d356bebad801cbe21c71eeae
| 4,299 |
py
|
Python
|
desktop/core/ext-py/python-ldap-2.3.13/Doc/conf.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 5,079 |
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/python-ldap-2.3.13/Doc/conf.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623 |
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/python-ldap-2.3.13/Doc/conf.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033 |
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# -*- coding: utf-8 -*-
#
# python-ldap documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 29 15:08:17 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
# If your extensions are in another directory, add it here.
#sys.path.append('some/directory')
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'python-ldap'
copyright = '2008, python-ldap project team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '2.3'
# The full version, including alpha/beta/rc tags.
release = '2.3.12.0'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-ldap-doc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [('index', 'python-ldap.tex', 'python-ldap Documentation',
'python-ldap project', 'manual')]
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = True
| 32.08209 | 83 | 0.701559 |
8158d3028459530aa21ebcb6248c60a91c922905
| 4,220 |
py
|
Python
|
src/networks/imagenet_WideResNet.py
|
bjfranks/Classification-AD
|
4eecd6648bb6b54662944921924c8960c2ca236c
|
[
"MIT"
] | 27 |
2020-05-30T16:27:31.000Z
|
2022-03-28T16:45:25.000Z
|
src/networks/imagenet_WideResNet.py
|
bjfranks/Classification-AD
|
4eecd6648bb6b54662944921924c8960c2ca236c
|
[
"MIT"
] | 3 |
2021-04-22T10:01:55.000Z
|
2022-01-13T02:50:31.000Z
|
src/networks/imagenet_WideResNet.py
|
bjfranks/Classification-AD
|
4eecd6648bb6b54662944921924c8960c2ca236c
|
[
"MIT"
] | 7 |
2020-06-15T16:31:23.000Z
|
2022-03-23T09:33:32.000Z
|
import torch.nn as nn
from base.base_net import BaseNet
from networks.cbam import CBAM
from torch.nn import init
# Credits to: https://github.com/hendrycks/ss-ood
class ImageNet_WideResNet(BaseNet):
def __init__(self, rep_dim=256):
self.inplanes = 64
super().__init__()
self.rep_dim = rep_dim
att_type = 'CBAM'
layers = [2, 2, 2, 2]
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.avgpool = nn.AvgPool2d(7)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.bam1, self.bam2, self.bam3 = None, None, None
self.layer1 = self._make_layer(BasicBlock, 64, layers[0], att_type=att_type)
self.layer2 = self._make_layer(BasicBlock, 128, layers[1], stride=2, att_type=att_type)
self.layer3 = self._make_layer(BasicBlock, 256, layers[2], stride=2, att_type=att_type)
self.layer4 = self._make_layer(BasicBlock, 512, layers[3], stride=2, att_type=att_type)
self.fc = nn.Linear(512 * BasicBlock.expansion, self.rep_dim)
init.kaiming_normal_(self.fc.weight)
for key in self.state_dict():
if key.split('.')[-1] == "weight":
if "conv" in key:
init.kaiming_normal_(self.state_dict()[key], mode='fan_out')
if "bn" in key:
if "SpatialGate" in key:
self.state_dict()[key][...] = 0
else:
self.state_dict()[key][...] = 1
elif key.split(".")[-1] == 'bias':
self.state_dict()[key][...] = 0
def _make_layer(self, block, planes, blocks, stride=1, att_type=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, use_cbam=att_type == 'CBAM'))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, use_cbam=att_type == 'CBAM'))
return nn.Sequential(*layers)
def forward(self, x):
x = x.view(-1, 3, 224, 224)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if not self.bam1 is None:
x = self.bam1(x)
x = self.layer2(x)
if not self.bam2 is None:
x = self.bam2(x)
x = self.layer3(x)
if not self.bam3 is None:
x = self.bam3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return self.fc(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, use_cbam=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
if use_cbam:
self.cbam = CBAM(planes, 16)
else:
self.cbam = None
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if not self.cbam is None:
out = self.cbam(out)
out += residual
out = self.relu(out)
return out
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
| 31.729323 | 100 | 0.565877 |
ee665819058bb9a8616cb39831b88d061db4d1a9
| 10,183 |
py
|
Python
|
newskylabs/tools/bookblock/logic/page.py
|
newskylabs/nsl-bookblock
|
77ff0b0ff7ad4f0487fb656d99819ed39b011c08
|
[
"Apache-2.0"
] | null | null | null |
newskylabs/tools/bookblock/logic/page.py
|
newskylabs/nsl-bookblock
|
77ff0b0ff7ad4f0487fb656d99819ed39b011c08
|
[
"Apache-2.0"
] | null | null | null |
newskylabs/tools/bookblock/logic/page.py
|
newskylabs/nsl-bookblock
|
77ff0b0ff7ad4f0487fb656d99819ed39b011c08
|
[
"Apache-2.0"
] | null | null | null |
"""newskylabs/tools/bookblock/logic/page.py:
Page.
"""
__author__ = "Dietrich Bollmann"
__email__ = "[email protected]"
__copyright__ = "Copyright 2019 Dietrich Bollmann"
__license__ = "Apache License 2.0, http://www.apache.org/licenses/LICENSE-2.0"
__date__ = "2019/10/18"
import sys, re
from pathlib import Path, PosixPath
from kivy.logger import Logger
# Numpy
import numpy as np
# OpenCV
import cv2
## =========================================================
## parse_geometry(geometry)
## ---------------------------------------------------------
# Example: 600x800+22+41
g_regexp_geometry = re.compile('(\d+)x(\d+)\+(\d+)\+(\d+)')
def parse_geometry(geometry):
"""
Example: 600x800+22+41
"""
m = g_regexp_geometry.match(geometry)
if m:
width = int(m.group(1))
height = int(m.group(2))
offset_left = int(m.group(3))
offset_top = int(m.group(4))
return (width, height, offset_left, offset_top)
else:
Logger.debug("parse_geometry(): From page {} to page {} left side {} right side {}"\
.format(from_page, to_page, left_side, right_side))
print("ERROR Malformed geometry: '{}'".format(geometry), file=sys.stderr)
exit(-1)
## =========================================================
## class Page:
## ---------------------------------------------------------
class Page:
"""
"""
def __init__(self, settings):
self._settings = settings
def get(self, page_spec):
# Get the view mode
view_mode = self._settings.get_view_mode()
Logger.debug("Page: View mode: {}".format(view_mode))
# Depending on the view mode return one of:
# - scan with bounding boxraw scan
# - page corresponding to the bounding box
# - scan as it is
if view_mode == 'scan':
page = self.get_scan_with_bounding_box(page_spec)
Logger.debug("Page: scan type(page): {}".format(type(page)))
elif view_mode == 'page':
page = self.get_page(page_spec)
Logger.debug("Page: page type(page): {}".format(type(page)))
else: # view_mode == 'raw':
page = self.get_scan_raw(page_spec)
Logger.debug("Page: raw type(page): {}".format(type(page)))
Logger.debug("Pages: type(page): {}".format(type(page)))
return page
def get_scan_raw(self, page_spec):
return page_spec['scan-path']
def load_scan(self, page_spec):
# Extract page info
scan = page_spec['scan']
# Define aliases for relevant settings
image_mode = self._settings.get_image_mode()
# Extract source and target file
scan_path = page_spec['scan-path']
# Print settings
msg = \
"DEBUG [Page] Loading Scanned page:\n" + \
"\n" + \
" - scan: {}\n".format(scan) + \
" - image_mode: {}\n".format(image_mode) + \
" - source file: {}\n".format(scan_path) + \
"\n"
Logger.debug(msg)
# Ensure that the scan file exists
if not Path(scan_path).exists():
# No file has been found
# print an ERROR and exit
print("ERROR File not found: {}".format(scan_path), file=sys.stderr)
sys.exit(2)
# Select image mode
if image_mode == 'color':
image_mode = cv2.IMREAD_COLOR
elif image_mode == 'grayscale':
image_mode = cv2.IMREAD_GRAYSCALE
else:
# ERROR:
# Undefined image mode
# Only one of `color' and `grayscale' is defined.
# - raise an error
raise TypeError("Undefined image mode: {} "
"Only `color' and `grayscale' are defined."\
.format(type(image_mode))
)
# Load an color image in grayscale
scan_data = cv2.imread(scan_path, image_mode)
# Return the loaded scan data
return scan_data
def calculate_bounding_box(self, page_spec, scan_size):
"""
Calculate the Bounding Box
"""
# Extract page info
side = page_spec['side']
# Define aliases for relevant settings
geometry = self._settings.get_geometry()
# Print settings
msg = \
"DEBUG [Page] Calculating bounding box:\n" + \
"\n" + \
" - scan size: {}\n".format(scan_size) + \
" - side: {}\n".format(side) + \
" - geometry: {}\n".format(geometry) + \
"\n"
Logger.debug(msg)
# Size of scan
scan_height, scan_width = scan_size
half_scan_width = int(scan_width / 2)
# Size of page
# parse geometry string
bb_width, bb_height, bb_offset_left, bb_offset_top = parse_geometry(geometry)
# Bounding box
# Calculate upper left and lower right point
# defining the bounding box
# Default: left page
bb_p1 = (bb_offset_left, bb_offset_top)
bb_p2 = (bb_p1[0] + bb_width - 1, bb_p1[1] + bb_height - 1)
# When the right page is required
# shift the bounding box to the right page
if side == 'right':
bb_p1 = (half_scan_width + bb_p1[0], bb_p1[1])
bb_p2 = (half_scan_width + bb_p2[0], bb_p2[1])
# Return boundng box
return (bb_p1, bb_p2)
def get_scan_with_bounding_box(self, page_spec): # wech, side, mode):
# Extract page info
scan = page_spec['scan']
side = page_spec['side']
page = page_spec['page']
# Extract source and target file
scan_path = page_spec['scan-path']
# Define aliases for relevant settings
geometry = self._settings.get_geometry()
# Print settings
msg = \
"DEBUG [Page] Draw bounding box on scanned page:\n" + \
"\n" + \
" - scan: {}\n".format(scan) + \
" - side: {}\n".format(side) + \
" - page: {}\n".format(page) + \
" - source file: {}\n".format(scan_path) + \
" - geometry: {}\n".format(geometry) + \
"\n"
Logger.debug(msg)
# Load the scan
scan = self.load_scan(page_spec)
# When the scan has not been found return False
if not isinstance(scan, (str, np.ndarray)):
return None
# Get the size of the scan
scan_size = scan.shape[:2]
# Calculate the Bounding Box
bb_p1, bb_p2 = self.calculate_bounding_box(page_spec, scan_size)
# Bounding box settings
bb_color = 0 # Black
bb_line_width = 2 # 2px line thickness
# Draw bounding box
cv2.rectangle(scan, bb_p1, bb_p2, bb_color, bb_line_width)
# Return scan with bounding box
return scan
def get_page(self, page_spec):
# Extract page info
scan = page_spec['scan']
side = page_spec['side']
page = page_spec['page']
# Extract source and target file
scan_path = page_spec['scan-path']
# Define aliases for relevant settings
geometry = self._settings.get_geometry()
# Print settings
msg = \
"DEBUG [Pages] Draw bounding box on scanned page:\n" + \
"\n" + \
" - scan: {}\n".format(scan) + \
" - side: {}\n".format(side) + \
" - page: {}\n".format(page) + \
" - source file: {}\n".format(scan_path) + \
" - geometry: {}\n".format(geometry) + \
"\n"
Logger.debug(msg)
# Load the scan
scan = self.load_scan(page_spec)
# When the scan has not been found return False
if not isinstance(scan, (str, np.ndarray)):
return False
# Get the size of the scan
scan_size = scan.shape[:2]
# Calculate the Bounding Box
bb_p1, bb_p2 = self.calculate_bounding_box(page_spec, scan_size)
# Calculate the page area
x1, y1 = bb_p1
x2, y2 = bb_p2
x = x1
y = y1
w = x2 - x1 + 1
h = y2 - y1 + 1
# Cut out page
Logger.debug("Page: Cutting out area: x: {}, y: {}, w: {}, h: {}".format(x, y, w, h))
page = scan.copy()
page = page[y:y+h, x:x+w]
# Return the page data
return page
def store_page(self, page_spec):
# Extract page info
page = page_spec['page']
scan = page_spec['scan']
side = page_spec['side']
# Define aliases for relevant settings
geometry = self._settings.get_geometry()
# Extract source and target file
page_path = page_spec['page-path']
# Print settings
msg = \
"DEBUG [Page] Store page:\n" + \
"\n" + \
" - page: {}\n".format(page) + \
" - scan: {}\n".format(scan) + \
" - side: {}\n".format(side) + \
" - geometry: {}\n".format(geometry) + \
" - target file: {}\n".format(page_path) + \
"\n"
Logger.debug(msg)
page = self.get_page(page_spec)
# When the page has not been found return False
if not isinstance(page, (str, np.ndarray)):
return False
# Ensure that the page directory exists
page_dir = PosixPath(page_path).parent
if not page_dir.exists():
Logger.debug("Pages: Creating page directory: {}".format(str(page_dir)))
page_dir.mkdir(parents=True, exist_ok=True)
# Save image
Logger.debug("Pages: Storing image: {}".format(page_path))
cv2.imwrite(page_path, page)
## =========================================================
## =========================================================
## fin.
| 30.216617 | 93 | 0.513306 |
1e3afe0b46a3037aa60bbad4115e947d217098e0
| 1,738 |
py
|
Python
|
examples/system/ulp/example_test.py
|
MagicInstall/esp-idf-NathanReeves-pio
|
b5efeeb5c00dff3f71f59db8849d399787f91c72
|
[
"Apache-2.0"
] | 8 |
2021-04-17T23:30:54.000Z
|
2022-02-12T19:10:34.000Z
|
examples/system/ulp/example_test.py
|
MagicInstall/esp-idf-NathanReeves-pio
|
b5efeeb5c00dff3f71f59db8849d399787f91c72
|
[
"Apache-2.0"
] | null | null | null |
examples/system/ulp/example_test.py
|
MagicInstall/esp-idf-NathanReeves-pio
|
b5efeeb5c00dff3f71f59db8849d399787f91c72
|
[
"Apache-2.0"
] | 3 |
2021-08-02T07:11:55.000Z
|
2021-11-09T06:02:05.000Z
|
from __future__ import unicode_literals
import re
import time
import ttfw_idf
from tiny_test_fw import Utility
@ttfw_idf.idf_example_test(env_tag='Example_GENERIC', target=['esp32'])
def test_examples_ulp(env, extra_data):
dut = env.get_dut('ulp', 'examples/system/ulp')
dut.start_app()
dut.expect_all('Not ULP wakeup, initializing ULP',
'Entering deep sleep',
timeout=30)
def generate_gpio0_events():
for _ in range(5):
dut.port_inst.setDTR(True) # Pulling GPIO0 low using DTR
time.sleep(0.25)
dut.port_inst.setDTR(False)
time.sleep(0.25)
nvs_value = None
for _ in range(5):
generate_gpio0_events()
dut.expect('ULP wakeup, saving pulse count', timeout=5)
Utility.console_log('Woke up...')
init_count = int(dut.expect(re.compile(r'Read pulse count from NVS:\s+(\d+)'), timeout=5)[0], 10)
assert nvs_value in (init_count, None), ('Read count is {} and previously written value is {}'
''.format(init_count, nvs_value))
inc = int(dut.expect(re.compile(r'Pulse count from ULP:\s+(\d+)'), timeout=5)[0], 10)
assert inc in (5, 6), 'pulse count is {}'.format(inc)
new_count = int(dut.expect(re.compile(r'Wrote updated pulse count to NVS:\s+(\d+)'), timeout=5)[0], 10)
assert init_count + inc == new_count, '{} + {} != {}'.format(init_count, inc, new_count)
nvs_value = new_count
Utility.console_log('Pulse count written to NVS: {}. Entering deep sleep...'.format(nvs_value))
dut.expect('Entering deep sleep', timeout=5)
if __name__ == '__main__':
test_examples_ulp()
| 37.782609 | 111 | 0.617376 |
b7efe5c3b4e0c60c0a27031651fd7fa59ab21345
| 64,306 |
py
|
Python
|
rst2pdf/createpdf.py
|
oz123/rst2pdf
|
9979c04a2cc88dce9aaf8312a6031796ead4943d
|
[
"MIT"
] | null | null | null |
rst2pdf/createpdf.py
|
oz123/rst2pdf
|
9979c04a2cc88dce9aaf8312a6031796ead4943d
|
[
"MIT"
] | null | null | null |
rst2pdf/createpdf.py
|
oz123/rst2pdf
|
9979c04a2cc88dce9aaf8312a6031796ead4943d
|
[
"MIT"
] | 1 |
2019-12-09T13:14:20.000Z
|
2019-12-09T13:14:20.000Z
|
# -*- coding: utf-8 -*-
#$URL$
#$Date$
#$Revision$
# See LICENSE.txt for licensing terms
# Some fragments of code are copied from Reportlab under this license:
#
#####################################################################################
#
# Copyright (c) 2000-2008, ReportLab Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the company nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE OFFICERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
#####################################################################################
__docformat__ = 'reStructuredText'
# Import Psyco if available
from opt_imports import psyco
psyco.full()
import sys
import os
import tempfile
import re
import string
import config
import logging
from cStringIO import StringIO
from os.path import abspath, dirname, expanduser, join
from urlparse import urljoin, urlparse, urlunparse
from copy import copy, deepcopy
from optparse import OptionParser
from pprint import pprint
from xml.sax.saxutils import unescape, escape
import docutils.readers.doctree
import docutils.core
import docutils.nodes
from docutils.parsers.rst import directives
from docutils.readers import standalone
from docutils.transforms import Transform
try:
from roman import toRoman
except ImportError:
from docutils.utils.roman import toRoman
from reportlab.platypus import *
from reportlab.platypus.doctemplate import IndexingFlowable
from reportlab.platypus.flowables import _listWrapOn, _Container
from reportlab.pdfbase.pdfdoc import PDFPageLabel
#from reportlab.lib.enums import *
#from reportlab.lib.units import *
#from reportlab.lib.pagesizes import *
from rst2pdf import counter_role, oddeven_directive
from rst2pdf import pygments_code_block_directive # code-block directive
from rst2pdf import flowables
from rst2pdf.flowables import * # our own reportlab flowables
from rst2pdf.sinker import Sinker
from rst2pdf.image import MyImage, missing
from rst2pdf.aafigure_directive import Aanode
from rst2pdf.log import log, nodeid
from rst2pdf.smartypants import smartyPants
from rst2pdf import styles as sty
from rst2pdf.nodehandlers import nodehandlers
from rst2pdf.languages import get_language_available
from rst2pdf.opt_imports import Paragraph, BaseHyphenator, PyHyphenHyphenator, \
DCWHyphenator, sphinx as sphinx_module, wordaxe
# Small template engine for covers
# The obvious import doesn't work for complicated reasons ;-)
from rst2pdf import tenjin
to_str = tenjin.helpers.generate_tostrfunc('utf-8')
escape = tenjin.helpers.escape
templateEngine = tenjin.Engine()
def renderTemplate(tname, **context):
context['to_str'] = to_str
context['escape'] = escape
return templateEngine.render(tname, context)
#def escape (x,y):
# "Dummy escape function to test for excessive escaping"
# return x
numberingstyles={ 'arabic': 'ARABIC',
'roman': 'ROMAN_UPPER',
'lowerroman': 'ROMAN_LOWER',
'alpha': 'LETTERS_UPPER',
'loweralpha': 'LETTERS_LOWER' }
class RstToPdf(object):
def __init__(self, stylesheets=[],
language='en_US',
header=None,
footer=None,
inlinelinks=False,
breaklevel=1,
font_path=[],
style_path=[],
fit_mode='shrink',
background_fit_mode='center',
sphinx=False,
smarty='0',
baseurl=None,
repeat_table_rows=False,
footnote_backlinks=True,
inline_footnotes=False,
real_footnotes=False,
def_dpi=300,
show_frame=False,
highlightlang='python', # this one is only used by Sphinx
basedir=os.getcwd(),
splittables=False,
blank_first_page=False,
first_page_on_right=False,
breakside='odd',
custom_cover='cover.tmpl',
floating_images=False,
numbered_links=False,
section_header_depth=2,
raw_html=False,
strip_elements_with_classes=[]
):
self.debugLinesPdf=False
self.depth=0
self.breakside=breakside
self.first_page_on_right=first_page_on_right
self.blank_first_page=blank_first_page
self.splittables=splittables
self.basedir=basedir
self.language, self.docutils_language = get_language_available(
language)[:2]
self.doc_title = ""
self.doc_title_clean = ""
self.doc_subtitle = ""
self.doc_author = ""
self.header = header
self.footer = footer
self.custom_cover=custom_cover
self.floating_images=floating_images
self.decoration = {'header': header,
'footer': footer,
'endnotes': [],
'extraflowables':[]}
# find base path
if hasattr(sys, 'frozen'):
self.PATH = abspath(dirname(sys.executable))
else:
self.PATH = abspath(dirname(__file__))
self.font_path=font_path
self.style_path=style_path
self.def_dpi=def_dpi
self.loadStyles(stylesheets)
self.docutils_languages = {}
self.inlinelinks = inlinelinks
self.breaklevel = breaklevel
self.fit_mode = fit_mode
self.background_fit_mode = background_fit_mode
self.to_unlink = []
self.smarty = smarty
self.baseurl = baseurl
self.repeat_table_rows = repeat_table_rows
self.footnote_backlinks = footnote_backlinks
self.inline_footnotes = inline_footnotes
self.real_footnotes = real_footnotes
# Real footnotes are always a two-pass thing.
if self.real_footnotes:
self.mustMultiBuild = True
self.def_dpi = def_dpi
self.show_frame = show_frame
self.numbered_links = numbered_links
self.section_header_depth = section_header_depth
self.img_dir = os.path.join(self.PATH, 'images')
self.raw_html = raw_html
self.strip_elements_with_classes = strip_elements_with_classes
# Sorry about this, but importing sphinx.roles makes some
# ordinary documents fail (demo.txt specifically) so
# I can' t just try to import it outside. I need
# to do it only if it's requested
if sphinx and sphinx_module:
import sphinx.roles
from sphinxnodes import sphinxhandlers
self.highlightlang = highlightlang
self.gen_pdftext, self.gen_elements = sphinxhandlers(self)
else:
# These rst2pdf extensions conflict with sphinx
directives.register_directive('code-block', pygments_code_block_directive.code_block_directive)
directives.register_directive('code', pygments_code_block_directive.code_block_directive)
import math_directive
self.gen_pdftext, self.gen_elements = nodehandlers(self)
self.sphinx = sphinx
if not self.styles.languages:
self.styles.languages=[]
if self.language:
self.styles.languages.append(self.language)
self.styles['bodytext'].language = self.language
else:
self.styles.languages.append('en_US')
self.styles['bodytext'].language = 'en_US'
# Load the docutils language modules for all required languages
for lang in self.styles.languages:
self.docutils_languages[lang] = get_language_available(lang)[2]
# Load the hyphenators for all required languages
if wordaxe is not None:
for lang in self.styles.languages:
if lang.split('_', 1)[0] == 'de':
try:
wordaxe.hyphRegistry[lang] = DCWHyphenator('de', 5)
continue
except Exception:
# hyphenators may not always be available or crash,
# e.g. wordaxe issue 2809074 (http://is.gd/16lqs)
log.warning("Can't load wordaxe DCW hyphenator"
" for German language, trying Py hyphenator instead")
else:
continue
try:
wordaxe.hyphRegistry[lang] = PyHyphenHyphenator(lang)
except Exception:
log.warning("Can't load wordaxe Py hyphenator"
" for language %s, trying base hyphenator", lang)
else:
continue
try:
wordaxe.hyphRegistry[lang] = BaseHyphenator(lang)
except Exception:
log.warning("Can't even load wordaxe base hyphenator")
log.info('hyphenation by default in %s , loaded %s',
self.styles['bodytext'].language,
','.join(self.styles.languages))
self.pending_targets=[]
self.targets=[]
def loadStyles(self, styleSheets=None ):
if styleSheets is None:
styleSheets=[]
self.styles = sty.StyleSheet(styleSheets,
self.font_path,
self.style_path,
def_dpi=self.def_dpi)
def style_language(self, style):
"""Return language corresponding to this style."""
try:
return style.language
except AttributeError:
pass
try:
return self.styles['bodytext'].language
except AttributeError:
# FIXME: this is pretty arbitrary, and will
# probably not do what you want.
# however, it should only happen if:
# * You specified the language of a style
# * Have no wordaxe installed.
# Since it only affects hyphenation, and wordaxe is
# not installed, t should have no effect whatsoever
return os.environ['LANG'] or 'en'
def text_for_label(self, label, style):
"""Translate text for label."""
try:
text = self.docutils_languages[
self.style_language(style)].labels[label]
except KeyError:
text = label.capitalize()
return text
def text_for_bib_field(self, field, style):
"""Translate text for bibliographic fields."""
try:
text = self.docutils_languages[
self.style_language(style)].bibliographic_fields[field]
except KeyError:
text = field
return text + ":"
def author_separator(self, style):
"""Return separator string for authors."""
try:
sep = self.docutils_languages[
self.style_language(style)].author_separators[0]
except KeyError:
sep = ';'
return sep + " "
def styleToTags(self, style):
'''Takes a style name, returns a pair of opening/closing tags for it, like
"<font face=helvetica size=14 color=red>". Used for inline
nodes (custom interpreted roles)'''
try:
s = self.styles[style]
r1=['<font face="%s" color="#%s" ' %
(s.fontName, s.textColor.hexval()[2:])]
bc = s.backColor
if bc:
r1.append('backColor="#%s"' % bc.hexval()[2:])
if s.trueFontSize:
r1.append('size="%d"'%s.fontSize)
r1.append('>')
r2=['</font>']
if s.strike:
r1.append('<strike>')
r2.insert(0,'</strike>')
if s.underline:
r1.append('<u>')
r2.insert(0,'</u>')
return [''.join(r1), ''.join(r2)]
except KeyError:
log.warning('Unknown class %s', style)
return None
def styleToFont(self, style):
'''Takes a style name, returns a font tag for it, like
"<font face=helvetica size=14 color=red>". Used for inline
nodes (custom interpreted roles)'''
try:
s = self.styles[style]
r=['<font face="%s" color="#%s" ' %
(s.fontName, s.textColor.hexval()[2:])]
bc = s.backColor
if bc:
r.append('backColor="#%s"' % bc.hexval()[2:])
if s.trueFontSize:
r.append('size="%d"'%s.fontSize)
r.append('>')
return ''.join(r)
except KeyError:
log.warning('Unknown class %s', style)
return None
def gather_pdftext(self, node, replaceEnt=True):
return ''.join([self.gen_pdftext(n, replaceEnt)
for n in node.children])
def gather_elements(self, node, style=None):
if style is None:
style = self.styles.styleForNode(node)
r = []
if 'float' in style.__dict__:
style = None # Don't pass floating styles to children!
for n in node.children:
# import pdb; pdb.set_trace()
r.extend(self.gen_elements(n, style=style))
return r
def bullet_for_node(self, node):
"""Takes a node, assumes it's some sort of
item whose parent is a list, and
returns the bullet text it should have"""
b = ""
t = 'item'
if node.parent.get('start'):
start = int(node.parent.get('start'))
else:
start = 1
if node.parent.get('bullet') or isinstance(
node.parent, docutils.nodes.bullet_list):
b = node.parent.get('bullet', '*')
if b == "None":
b = ""
t = 'bullet'
elif node.parent.get('enumtype') == 'arabic':
b = str(node.parent.children.index(node) + start) + '.'
elif node.parent.get('enumtype') == 'lowerroman':
b = toRoman(node.parent.children.index(node) + start).lower() + '.'
elif node.parent.get('enumtype') == 'upperroman':
b = toRoman(node.parent.children.index(node) + start).upper() + '.'
elif node.parent.get('enumtype') == 'loweralpha':
b = string.lowercase[node.parent.children.index(node)
+ start - 1] + '.'
elif node.parent.get('enumtype') == 'upperalpha':
b = string.uppercase[node.parent.children.index(node)
+ start - 1] + '.'
else:
log.critical("Unknown kind of list_item %s [%s]",
node.parent, nodeid(node))
return b, t
def filltable(self, rows):
"""
Takes a list of rows, consisting of cells and performs the following fixes:
* For multicolumn cells, add continuation cells, to make all rows the same
size. These cells have to be multirow if the original cell is multirow.
* For multirow cell, insert continuation cells, to make all columns the
same size.
* If there are still shorter rows, add empty cells at the end (ReST quirk)
* Once the table is *normalized*, create spans list, fitting for reportlab's
Table class.
"""
# If there is a multicol cell, we need to insert Continuation Cells
# to make all rows the same length
#from pudb import set_trace; set_trace()
for y in range(0, len(rows)):
for x in range(len(rows[y])-1, -1, -1):
cell = rows[y][x]
if isinstance(cell, str):
continue
if cell.get("morecols"):
for i in range(0, cell.get("morecols")):
e=docutils.nodes.entry("")
e["morerows"] = cell.get("morerows",0)
rows[y].insert(x + 1, e)
for y in range(0, len(rows)):
for x in range(0, len(rows[y])):
cell = rows[y][x]
if isinstance(cell, str):
continue
if cell.get("morerows"):
for i in range(0, cell.get("morerows")):
rows[y + i + 1].insert(x, "")
# If a row is shorter, add empty cells at the right end
maxw = max([len(r) for r in rows])
for r in rows:
while len(r) < maxw:
r.append("")
# Create spans list for reportlab's table style
spans = []
for y in range(0, len(rows)):
for x in range(0, len(rows[y])):
cell = rows[y][x]
if isinstance(cell, str):
continue
if cell.get("morecols"):
mc = cell.get("morecols")
else:
mc = 0
if cell.get("morerows"):
mr = cell.get("morerows")
else:
mr = 0
if mc or mr:
spans.append(('SPAN', (x, y), (x + mc, y + mr)))
return spans
def PreformattedFit(self, text, style):
"""Preformatted section that gets horizontally compressed if needed."""
# Pass a ridiculous size, then it will shrink to what's available
# in the frame
return BoundByWidth(2000*cm,
content=[XXPreformatted(text, style)],
mode=self.fit_mode, style=style)
def createPdf(self, text=None,
source_path=None,
output=None,
doctree=None,
compressed=False,
# This adds entries to the PDF TOC
# matching the rst source lines
debugLinesPdf=False):
"""Create a PDF from text (ReST input),
or doctree (docutil nodes) and save it in outfile.
If outfile is a string, it's a filename.
If it's something with a write method, (like a StringIO,
or a file object), the data is saved there.
"""
self.decoration = {'header': self.header,
'footer': self.footer,
'endnotes': [],
'extraflowables': []}
self.pending_targets=[]
self.targets=[]
self.debugLinesPdf = debugLinesPdf
if doctree is None:
if text is not None:
if self.language:
settings_overrides={'language_code': self.docutils_language}
else:
settings_overrides={}
settings_overrides['strip_elements_with_classes']=self.strip_elements_with_classes
self.doctree = docutils.core.publish_doctree(text,
source_path=source_path,
settings_overrides=settings_overrides)
#import pdb; pdb.set_trace()
log.debug(self.doctree)
else:
log.error('Error: createPdf needs a text or a doctree')
return
else:
self.doctree = doctree
if self.numbered_links:
# Transform all links to sections so they show numbers
from sectnumlinks import SectNumFolder, SectRefExpander
snf = SectNumFolder(self.doctree)
self.doctree.walk(snf)
srf = SectRefExpander(self.doctree, snf.sectnums)
self.doctree.walk(srf)
if self.strip_elements_with_classes:
from docutils.transforms.universal import StripClassesAndElements
sce = StripClassesAndElements(self.doctree)
sce.apply()
elements = self.gen_elements(self.doctree)
# Find cover template, save it in cover_file
def find_cover(name):
cover_path=[self.basedir, os.path.expanduser('~/.rst2pdf'),
os.path.join(self.PATH,'templates')]
cover_file=None
for d in cover_path:
if os.path.exists(os.path.join(d,name)):
cover_file=os.path.join(d,name)
break
return cover_file
cover_file=find_cover(self.custom_cover)
if cover_file is None:
log.error("Can't find cover template %s, using default"%self.custom_cover)
cover_file=find_cover('cover.tmpl')
# Feed data to the template, get restructured text.
cover_text = renderTemplate(tname=cover_file,
title=self.doc_title,
subtitle=self.doc_subtitle
)
# This crashes sphinx because .. class:: in sphinx is
# something else. Ergo, pdfbuilder does it in its own way.
if not self.sphinx:
elements = self.gen_elements(
publish_secondary_doctree(cover_text, self.doctree, source_path)) + elements
if self.blank_first_page:
elements.insert(0,PageBreak())
# Put the endnotes at the end ;-)
endnotes = self.decoration['endnotes']
if endnotes:
elements.append(MySpacer(1, 2*cm))
elements.append(Separation())
for n in self.decoration['endnotes']:
t_style = TableStyle(self.styles['endnote'].commands)
colWidths = self.styles['endnote'].colWidths
elements.append(DelayedTable([[n[0], n[1]]],
style=t_style, colWidths=colWidths))
if self.floating_images:
#from pudb import set_trace; set_trace()
# Handle images with alignment more like in HTML
new_elem=[]
for i,e in enumerate(elements[::-1]):
if (isinstance (e, MyImage) and e.image.hAlign != 'CENTER'
and new_elem):
# This is an image where flowables should wrap
# around it
popped=new_elem.pop()
new_elem.append(ImageAndFlowables(e,popped,
imageSide=e.image.hAlign.lower()))
else:
new_elem.append(e)
elements = new_elem
elements.reverse()
head = self.decoration['header']
foot = self.decoration['footer']
# So, now, create the FancyPage with the right sizes and elements
FP = FancyPage("fancypage", head, foot, self)
def cleantags(s):
re.sub(r'<[^>]*?>', '',
unicode(s).strip())
pdfdoc = FancyDocTemplate(
output,
pageTemplates=[FP],
showBoundary=0,
pagesize=self.styles.ps,
title=self.doc_title_clean,
author=self.doc_author,
pageCompression=compressed)
pdfdoc.client =self
if getattr(self, 'mustMultiBuild', False):
# Force a multibuild pass
if not isinstance(elements[-1],UnhappyOnce):
log.info ('Forcing second pass so Total pages work')
elements.append(UnhappyOnce())
while True:
try:
log.info("Starting build")
# See if this *must* be multipass
pdfdoc.multiBuild(elements)
# Force a multibuild pass
# FIXME: since mustMultiBuild is set by the
# first pass in the case of ###Total###, then we
# make a new forced two-pass build. This is broken.
# conceptually.
if getattr(self, 'mustMultiBuild', False):
# Force a multibuild pass
if not isinstance(elements[-1],UnhappyOnce):
log.info ('Forcing second pass so Total pages work')
elements.append(UnhappyOnce())
continue
## Rearrange footnotes if needed
if self.real_footnotes:
newStory=[]
fnPile=[]
for e in elements:
if getattr(e,'isFootnote',False):
# Add it to the pile
#if not isinstance (e, MySpacer):
fnPile.append(e)
elif getattr(e, '_atTop', False) or isinstance(
e, (UnhappyOnce, MyPageBreak)):
if fnPile:
fnPile.insert(0, Separation())
newStory.append(Sinker(fnPile))
newStory.append(e)
fnPile=[]
else:
newStory.append(e)
elements = newStory+fnPile
for e in elements:
if hasattr(e, '_postponed'):
delattr(e,'_postponed')
self.real_footnotes = False
continue
break
except ValueError as v:
# FIXME: cross-document links come through here, which means
# an extra pass per cross-document reference. Which sucks.
#if v.args and str(v.args[0]).startswith('format not resolved'):
#missing=str(v.args[0]).split(' ')[-1]
#log.error('Adding missing reference to %s and rebuilding. This is slow!'%missing)
#elements.append(Reference(missing))
#for e in elements:
#if hasattr(e,'_postponed'):
#delattr(e,'_postponed')
#else:
#raise
raise
#doc = SimpleDocTemplate("phello.pdf")
#doc.build(elements)
for fn in self.to_unlink:
try:
os.unlink(fn)
except OSError:
pass
from reportlab.platypus import doctemplate
class FancyDocTemplate(BaseDocTemplate):
def afterFlowable(self, flowable):
if isinstance(flowable, Heading):
# Notify TOC entry for headings/abstracts/dedications.
level, text = flowable.level, flowable.text
parent_id = flowable.parent_id
node = flowable.node
pagenum = setPageCounter()
self.notify('TOCEntry', (level, text, pagenum, parent_id, node))
def handle_flowable(self,flowables):
'''try to handle one flowable from the front of list flowables.'''
# this method is copied from reportlab
#allow document a chance to look at, modify or ignore
#the object(s) about to be processed
self.filterFlowables(flowables)
self.handle_breakBefore(flowables)
self.handle_keepWithNext(flowables)
f = flowables[0]
del flowables[0]
if f is None:
return
if isinstance(f,PageBreak):
if isinstance(f,SlowPageBreak):
self.handle_pageBreak(slow=1)
else:
self.handle_pageBreak()
self.afterFlowable(f)
elif isinstance(f,ActionFlowable):
f.apply(self)
self.afterFlowable(f)
else:
frame = self.frame
canv = self.canv
#try to fit it then draw it
if frame.add(f, canv, trySplit=self.allowSplitting):
if not isinstance(f,FrameActionFlowable):
self._curPageFlowableCount += 1
self.afterFlowable(f)
doctemplate._addGeneratedContent(flowables,frame)
else:
if self.allowSplitting:
# see if this is a splittable thing
S = frame.split(f,canv)
n = len(S)
else:
n = 0
if n:
if not isinstance(S[0],(PageBreak,SlowPageBreak,ActionFlowable)):
if frame.add(S[0], canv, trySplit=0):
self._curPageFlowableCount += 1
self.afterFlowable(S[0])
doctemplate._addGeneratedContent(flowables,frame)
else:
ident = "Splitting error(n==%d) on page %d in\n%s" % (
n, self.page, self._fIdent(f, 60, frame))
#leave to keep apart from the raise
raise LayoutError(ident)
del S[0]
for i,f in enumerate(S):
flowables.insert(i,f) # put split flowables back on the list
else:
if hasattr(f,'_postponed') and f._postponed > 4:
ident = "Flowable %s%s too large on page %d in frame %r%s of template %r" % (
self._fIdent(f, 60, frame), doctemplate._fSizeString(f),self.page,
self.frame.id, self.frame._aSpaceString(), self.pageTemplate.id)
#leave to keep apart from the raise
raise LayoutError(ident)
# this ought to be cleared when they are finally drawn!
f._postponed = 1
mbe = getattr(self, '_multiBuildEdits', None)
if mbe:
mbe((delattr, f, '_postponed'))
flowables.insert(0, f) # put the flowable back
self.handle_frameEnd()
_counter=0
_counterStyle='arabic'
class PageCounter(Flowable):
def __init__(self, number=0, style='arabic'):
self.style=str(style).lower()
self.number=int(number)
Flowable.__init__(self)
def wrap(self, availWidth, availHeight):
global _counter, _counterStyle
_counterStyle=self.style
_counter=self.number
return (self.width, self.height)
def drawOn(self, canvas, x, y, _sW):
pass
flowables.PageCounter = PageCounter
def setPageCounter(counter=None, style=None):
global _counter, _counterStyle
if counter is not None:
_counter = counter
if style is not None:
_counterStyle = style
if _counterStyle=='lowerroman':
ptext=toRoman(_counter).lower()
elif _counterStyle=='roman':
ptext=toRoman(_counter).upper()
elif _counterStyle=='alpha':
ptext=string.uppercase[_counter%26]
elif _counterStyle=='loweralpha':
ptext=string.lowercase[_counter%26]
else:
ptext=unicode(_counter)
return ptext
class MyContainer(_Container, Flowable):
pass
class UnhappyOnce(IndexingFlowable):
'''An indexing flowable that is only unsatisfied once.
If added to a story, it will make multiBuild run
at least two passes. Useful for ###Total###'''
_unhappy=True
def isSatisfied(self):
if self._unhappy:
self._unhappy= False
return False
return True
def draw(self):
pass
class HeaderOrFooter(object):
""" A helper object for FancyPage (below)
HeaderOrFooter handles operations which are common
to both headers and footers
"""
def __init__(self, items=None, isfooter=False, client=None):
self.items = items
if isfooter:
locinfo = 'footer showFooter defaultFooter footerSeparator'
else:
locinfo = 'header showHeader defaultHeader headerSeparator'
self.isfooter = isfooter
self.loc, self.showloc, self.defaultloc, self.addsep = locinfo.split()
self.totalpages = 0
self.client = client
def prepare(self, pageobj, canv, doc):
showloc = pageobj.template.get(self.showloc, True)
height = 0
items = self.items
if showloc:
if not items:
items = pageobj.template.get(self.defaultloc)
if items:
items = self.client.gen_elements(publish_secondary_doctree(items, self.client.doctree, None))
if items:
if isinstance(items, list):
items = items[:]
else:
items = [Paragraph(items, pageobj.styles[self.loc])]
addsep = pageobj.template.get(self.addsep, False)
if addsep:
if self.isfooter:
items.insert(0, Separation())
else:
items.append(Separation())
_, height = _listWrapOn(items, pageobj.tw, canv)
self.prepared = height and items
return height
def replaceTokens(self, elems, canv, doc, smarty):
"""Put doc_title/page number/etc in text of header/footer."""
# Make sure page counter is up to date
pnum=setPageCounter()
def replace(text):
if not isinstance(text, unicode):
try:
text = unicode(text, e.encoding)
except AttributeError:
text = unicode(text, 'utf-8')
except TypeError:
text = unicode(text, 'utf-8')
text = text.replace(u'###Page###', pnum)
if '###Total###' in text:
text = text.replace(u'###Total###', str(self.totalpages))
self.client.mustMultiBuild=True
text = text.replace(u"###Title###", doc.title)
text = text.replace(u"###Section###",
getattr(canv, 'sectName', ''))
text = text.replace(u"###SectNum###",
getattr(canv, 'sectNum', ''))
text = smartyPants(text, smarty)
return text
for i,e in enumerate(elems):
# TODO: implement a search/replace for arbitrary things
if isinstance(e, Paragraph):
text = replace(e.text)
elems[i] = Paragraph(text, e.style)
elif isinstance(e, DelayedTable):
data=deepcopy(e.data)
for r,row in enumerate(data):
for c,cell in enumerate(row):
if isinstance (cell, list):
data[r][c]=self.replaceTokens(cell, canv, doc, smarty)
else:
row[r]=self.replaceTokens([cell,], canv, doc, smarty)[0]
elems[i]=DelayedTable(data, e._colWidths, e.style)
elif isinstance(e, BoundByWidth):
for index, item in enumerate(e.content):
if isinstance(item, Paragraph):
e.content[index] = Paragraph(replace(item.text), item.style)
elems[i] = e
elif isinstance(e, OddEven):
odd=self.replaceTokens([e.odd,], canv, doc, smarty)[0]
even=self.replaceTokens([e.even,], canv, doc, smarty)[0]
elems[i]=OddEven(odd, even)
return elems
def draw(self, pageobj, canv, doc, x, y, width, height):
self.totalpages = max(self.totalpages, doc.page)
items = self.prepared
if items:
self.replaceTokens(items, canv, doc, pageobj.smarty)
container = MyContainer()
container._content = items
container.width = width
container.height = height
container.drawOn(canv, x, y)
class FancyPage(PageTemplate):
""" A page template that handles changing layouts.
"""
def __init__(self, _id, _head, _foot, client):
self.client = client
self.styles = client.styles
self._head = HeaderOrFooter(_head, client=client)
self._foot = HeaderOrFooter(_foot, True, client)
self.smarty = client.smarty
self.show_frame = client.show_frame
self.image_cache = {}
PageTemplate.__init__(self, _id, [])
def draw_background(self, which, canv):
''' Draws a background and/or foreground image
on each page which uses the template.
Calculates the image one time, and caches
it for reuse on every page in the template.
How the background is drawn depends on the
--fit-background-mode option.
If desired, we could add code to push it around
on the page, using stylesheets to align and/or
set the offset.
'''
uri=self.template[which]
info = self.image_cache.get(uri)
if info is None:
fname, _, _ = MyImage.split_uri(uri)
if not os.path.exists(fname):
del self.template[which]
log.error("Missing %s image file: %s", which, uri)
return
try:
w, h, kind = MyImage.size_for_node(dict(uri=uri, ), self.client)
except ValueError:
# Broken image, return arbitrary stuff
uri=missing
w, h, kind = 100, 100, 'direct'
pw, ph = self.styles.pw, self.styles.ph
if self.client.background_fit_mode == 'center':
scale = min(1.0, 1.0 * pw / w, 1.0 * ph / h)
sw, sh = w * scale, h * scale
x, y = (pw - sw) / 2.0, (ph - sh) / 2.0
elif self.client.background_fit_mode == 'scale':
x, y = 0, 0
sw, sh = pw, ph
else:
log.error('Unknown background fit mode: %s'% self.client.background_fit_mode)
# Do scale anyway
x, y = 0, 0
sw, sh = pw, ph
bg = MyImage(uri, sw, sh, client=self.client)
self.image_cache[uri] = info = bg, x, y
bg, x, y = info
bg.drawOn(canv, x, y)
def is_left(self, page_num):
"""Default behavior is that the first page is on the left.
If the user has --first_page_on_right, the calculation is reversed.
"""
val = page_num % 2 == 1
if self.client.first_page_on_right:
val = not val
return val
def beforeDrawPage(self, canv, doc):
"""Do adjustments to the page according to where we are in the document.
* Gutter margins on left or right as needed
"""
global _counter, _counterStyle
styles = self.styles
self.tw = styles.pw - styles.lm - styles.rm - styles.gm
# What page template to use?
tname = canv.__dict__.get('templateName',
self.styles.firstTemplate)
self.template = self.styles.pageTemplates[tname]
canv.templateName=tname
doct = getattr(canv, '_doctemplate', None)
canv._doctemplate = None # to make _listWrapOn work
if doc.page == 1:
_counter = 0
_counterStyle = 'arabic'
_counter += 1
# Adjust text space accounting for header/footer
self.hh = self._head.prepare(self, canv, doc)
self.fh = self._foot.prepare(self, canv, doc)
canv._doctemplate = doct
self.hx = styles.lm
self.hy = styles.ph - styles.tm - self.hh
self.fx = styles.lm
self.fy = styles.bm
self.th = styles.ph - styles.tm - styles.bm - self.hh \
- self.fh - styles.ts - styles.bs
# Adjust gutter margins
if self.is_left(doc.page): # Left page
x1 = styles.lm
else: # Right page
x1 = styles.lm + styles.gm
y1 = styles.bm + self.fh + styles.bs
# If there is a background parameter for this page Template, draw it
if 'background' in self.template:
self.draw_background('background', canv)
self.frames = []
for frame in self.template['frames']:
self.frames.append(SmartFrame(self,
styles.adjustUnits(frame[0], self.tw) + x1,
styles.adjustUnits(frame[1], self.th) + y1,
styles.adjustUnits(frame[2], self.tw),
styles.adjustUnits(frame[3], self.th),
showBoundary=self.show_frame))
canv.firstSect = True
canv._pagenum = doc.page
for frame in self.frames:
frame._pagenum=doc.page
def afterDrawPage(self, canv, doc):
"""Draw header/footer."""
# Adjust for gutter margin
canv.addPageLabel(canv._pageNumber-1,numberingstyles[_counterStyle],_counter)
log.info('Page %s [%s]'%(_counter,doc.page))
if self.is_left(doc.page): # Left page
hx = self.hx
fx = self.fx
else: # Right Page
hx = self.hx + self.styles.gm
fx = self.fx + self.styles.gm
self._head.draw(self, canv, doc, hx, self.hy, self.tw, self.hh)
self._foot.draw(self, canv, doc, fx, self.fy, self.tw, self.fh)
# If there is a foreground parameter for this page Template, draw it
if 'foreground' in self.template:
self.draw_background('foreground', canv)
def parse_commandline():
parser = OptionParser()
parser.add_option('--config', dest='configfile', metavar='FILE',
help='Config file to use. Default=~/.rst2pdf/config')
parser.add_option('-o', '--output', dest='output', metavar='FILE',
help='Write the PDF to FILE')
def_ssheets = ','.join([expanduser(p) for p in
config.getValue("general", "stylesheets", "").split(',')])
parser.add_option('-s', '--stylesheets', dest='style',
type='string', action='append',
metavar='STYLESHEETS', default=[def_ssheets],
help='A comma-separated list of custom stylesheets. Default="%s"'
% def_ssheets)
def_sheetpath = os.pathsep.join([expanduser(p) for p in
config.getValue("general", "stylesheet_path", "").split(os.pathsep)])
parser.add_option('--stylesheet-path', dest='stylepath',
metavar='FOLDER%sFOLDER%s...%sFOLDER'%((os.pathsep, )*3),
default=def_sheetpath,
help='A list of folders to search for stylesheets,'
' separated using "%s". Default="%s"' %(os.pathsep, def_sheetpath))
def_compressed = config.getValue("general", "compressed", False)
parser.add_option('-c', '--compressed', dest='compressed',
action="store_true", default=def_compressed,
help='Create a compressed PDF. Default=%s'%def_compressed)
parser.add_option('--print-stylesheet', dest='printssheet',
action="store_true", default=False,
help='Print the default stylesheet and exit')
parser.add_option('--font-folder', dest='ffolder', metavar='FOLDER',
help='Search this folder for fonts. (Deprecated)')
def_fontpath = os.pathsep.join([expanduser(p) for p in
config.getValue("general", "font_path", "").split(os.pathsep)])
parser.add_option('--font-path', dest='fpath',
metavar='FOLDER%sFOLDER%s...%sFOLDER'%((os.pathsep, )*3),
default=def_fontpath,
help='A list of folders to search for fonts, separated using "%s".'
' Default="%s"' % (os.pathsep, def_fontpath))
def_baseurl = urlunparse(['file',os.getcwd()+os.sep,'','','',''])
parser.add_option('--baseurl', dest='baseurl', metavar='URL',
default=def_baseurl,
help='The base URL for relative URLs. Default="%s"'%def_baseurl)
def_lang = config.getValue("general", "language", 'en_US')
parser.add_option('-l', '--language', metavar='LANG',
default=def_lang, dest='language',
help='Language to be used for hyphenation'
' and docutils localizations. Default="%s"' % def_lang)
def_header = config.getValue("general", "header")
parser.add_option('--header', metavar='HEADER',
default=def_header, dest='header',
help='Page header if not specified in the document.'
' Default="%s"' % def_header)
def_footer = config.getValue("general", "footer")
parser.add_option('--footer', metavar='FOOTER',
default=def_footer, dest='footer',
help='Page footer if not specified in the document.'
' Default="%s"' % def_footer)
def_section_header_depth = config.getValue("general","section_header_depth",2)
parser.add_option('--section-header-depth', metavar='N',
default=def_section_header_depth, dest='section_header_depth',
help = '''Sections up to this depth will be used in the header and footer's replacement of ###Section###. Default=%s''' % def_section_header_depth)
def_smartquotes = config.getValue("general", "smartquotes", "0")
parser.add_option("--smart-quotes", metavar="VALUE",
default=def_smartquotes, dest="smarty",
help='Try to convert ASCII quotes, ellipses and dashes'
' to the typographically correct equivalent. For details,'
' read the man page or the manual. Default="%s"' % def_smartquotes)
def_fit = config.getValue("general", "fit_mode", "shrink")
parser.add_option('--fit-literal-mode', metavar='MODE',
default=def_fit, dest='fit_mode',
help='What to do when a literal is too wide. One of error,'
' overflow,shrink,truncate. Default="%s"' % def_fit)
def_fit_background = config.getValue("general", "background_fit_mode",
"center")
parser.add_option('--fit-background-mode', metavar='MODE',
default=def_fit_background, dest='background_fit_mode',
help='How to fit the background image to the page.'
' One of scale or center. Default="%s"' % def_fit_background)
parser.add_option('--inline-links', action="store_true",
dest='inlinelinks', default=False,
help='Shows target between parentheses instead of active link.')
parser.add_option('--repeat-table-rows', action="store_true",
dest='repeattablerows', default=False,
help='Repeats header row for each split table.')
def_raw_html = config.getValue("general", "raw_html", False)
parser.add_option('--raw-html', action="store_true",
dest='raw_html', default=def_raw_html,
help='Support embeddig raw HTML. Default=%s' % def_raw_html)
parser.add_option('-q', '--quiet', action="store_true",
dest='quiet', default=False,
help='Print less information.')
parser.add_option('-v', '--verbose', action="store_true",
dest='verbose', default=False,
help='Print debug information.')
parser.add_option('--very-verbose', action="store_true",
dest='vverbose', default=False,
help='Print even more debug information.')
parser.add_option('--version', action="store_true",
dest='version', default=False,
help='Print version number and exit.')
def_footnote_backlinks = config.getValue("general",
"footnote_backlinks", True)
parser.add_option('--no-footnote-backlinks', action='store_false',
dest='footnote_backlinks', default=def_footnote_backlinks,
help='Disable footnote backlinks.'
' Default=%s' % str(not def_footnote_backlinks))
def_inline_footnotes = config.getValue("general",
"inline_footnotes", False)
parser.add_option('--inline-footnotes', action='store_true',
dest='inline_footnotes', default=def_inline_footnotes,
help='Show footnotes inline.'
' Default=%s' % str(not def_inline_footnotes))
def_real_footnotes = config.getValue("general",
"real_footnotes", False)
parser.add_option('--real-footnotes', action='store_true',
dest='real_footnotes', default=def_real_footnotes,
help='Show footnotes at the bottom of the page where they are defined.'
' Default=%s' % str(def_real_footnotes))
def_dpi = config.getValue("general", "default_dpi", 300)
parser.add_option('--default-dpi', dest='def_dpi', metavar='NUMBER',
default=def_dpi,
help='DPI for objects sized in pixels. Default=%d'%def_dpi)
parser.add_option('--show-frame-boundary', dest='show_frame',
action='store_true', default=False,
help='Show frame borders (only useful for debugging). Default=False')
parser.add_option('--disable-splittables', dest='splittables',
action='store_false', default=True,
help="Don't use splittable flowables in some elements."
" Only try this if you can't process a document any other way.")
def_break = config.getValue("general", "break_level", 0)
parser.add_option('-b', '--break-level', dest='breaklevel',
metavar='LEVEL', default=def_break,
help='Maximum section level that starts in a new page.'
' Default: %d' % def_break)
def_blankfirst = config.getValue("general", "blank_first_page", False)
parser.add_option('--blank-first-page', dest='blank_first_page',
action='store_true', default=def_blankfirst,
help='Add a blank page at the beginning of the document.')
def_first_page_on_right = config.getValue("general", "first_page_on_right", False)
parser.add_option('--first-page-on-right', dest='first_page_on_right',
action='store_true', default=def_first_page_on_right,
help='Two-sided book style (where first page starts on the right side)')
def_breakside = config.getValue("general", "break_side", 'any')
parser.add_option('--break-side', dest='breakside', metavar='VALUE',
default=def_breakside,
help='How section breaks work. Can be "even", and sections start'
' in an even page, "odd", and sections start in odd pages,'
' or "any" and sections start in the next page, be it even or odd.'
' See also the -b option.')
parser.add_option('--date-invariant', dest='invariant',
action='store_true', default=False,
help="Don't store the current date in the PDF."
" Useful mainly for the test suite,"
" where we don't want the PDFs to change.")
parser.add_option('-e', '--extension-module', dest='extensions', action="append", type="string",
default = ['vectorpdf'],
help="Add a helper extension module to this invocation of rst2pdf "
"(module must end in .py and be on the python path)")
def_cover = config.getValue("general", "custom_cover", 'cover.tmpl')
parser.add_option('--custom-cover', dest='custom_cover',
metavar='FILE', default= def_cover,
help='Template file used for the cover page. Default: %s'%def_cover)
def_floating_images = config.getValue("general", "floating_images", False)
parser.add_option('--use-floating-images', action='store_true', default=def_floating_images,
help='Makes images with :align: attribute work more like in rst2html. Default: %s'%def_floating_images,
dest='floating_images')
def_numbered_links = config.getValue("general", "numbered_links", False)
parser.add_option('--use-numbered-links', action='store_true', default=def_numbered_links,
help='When using numbered sections, adds the numbers to all links referring to the section headers. Default: %s'%def_numbered_links,
dest='numbered_links')
parser.add_option('--strip-elements-with-class', action='append', dest='strip_elements_with_classes',
metavar='CLASS', help='Remove elements with this CLASS from the output. Can be used multiple times.')
return parser
def main(_args=None):
"""Parse command line and call createPdf with the correct data."""
parser = parse_commandline()
# Fix issue 430: don't overwrite args
# need to parse_args to see i we have a custom config file
options, args = parser.parse_args(copy(_args))
if options.configfile:
# If there is a config file, we need to reparse
# the command line because we have different defaults
config.parseConfig(options.configfile)
parser = parse_commandline()
options, args = parser.parse_args(copy(_args))
if options.version:
from rst2pdf import version
print version
sys.exit(0)
if options.quiet:
log.setLevel(logging.CRITICAL)
if options.verbose:
log.setLevel(logging.INFO)
if options.vverbose:
log.setLevel(logging.DEBUG)
if options.printssheet:
# find base path
if hasattr(sys, 'frozen'):
PATH = abspath(dirname(sys.executable))
else:
PATH = abspath(dirname(__file__))
print open(join(PATH, 'styles', 'styles.style')).read()
sys.exit(0)
filename = False
if len(args) == 0:
args = [ '-', ]
elif len(args) > 2:
log.critical('Usage: %s [ file.txt [ file.pdf ] ]', sys.argv[0])
sys.exit(1)
elif len(args) == 2:
if options.output:
log.critical('You may not give both "-o/--output" and second argument')
sys.exit(1)
options.output = args.pop()
close_infile = False
if args[0] == '-':
infile = sys.stdin
options.basedir=os.getcwd()
elif len(args) > 1:
log.critical('Usage: %s file.txt [ -o file.pdf ]', sys.argv[0])
sys.exit(1)
else:
filename = args[0]
options.basedir=os.path.dirname(os.path.abspath(filename))
try:
infile = open(filename)
close_infile = True
except IOError as e:
log.error(e)
sys.exit(1)
options.infile = infile
if options.output:
outfile = options.output
if outfile == '-':
outfile = sys.stdout
options.compressed = False
#we must stay quiet
log.setLevel(logging.CRITICAL)
else:
if filename:
if filename.endswith('.txt') or filename.endswith('.rst'):
outfile = filename[:-4] + '.pdf'
else:
outfile = filename + '.pdf'
else:
outfile = sys.stdout
options.compressed = False
#we must stay quiet
log.setLevel(logging.CRITICAL)
#/reportlab/pdfbase/pdfdoc.py output can
#be a callable (stringio, stdout ...)
options.outfile = outfile
ssheet = []
if options.style:
for l in options.style:
ssheet += l.split(',')
else:
ssheet = []
options.style = [x for x in ssheet if x]
fpath = []
if options.fpath:
fpath = options.fpath.split(os.pathsep)
if options.ffolder:
fpath.append(options.ffolder)
options.fpath = fpath
spath = []
if options.stylepath:
spath = options.stylepath.split(os.pathsep)
options.stylepath = spath
if options.real_footnotes:
options.inline_footnotes = True
if reportlab.Version < '2.3':
log.warning('You are using Reportlab version %s.'
' The suggested version is 2.3 or higher' % reportlab.Version)
if options.invariant:
patch_PDFDate()
patch_digester()
add_extensions(options)
RstToPdf(
stylesheets=options.style,
language=options.language,
header=options.header, footer=options.footer,
inlinelinks=options.inlinelinks,
breaklevel=int(options.breaklevel),
baseurl=options.baseurl,
fit_mode=options.fit_mode,
background_fit_mode = options.background_fit_mode,
smarty=str(options.smarty),
font_path=options.fpath,
style_path=options.stylepath,
repeat_table_rows=options.repeattablerows,
footnote_backlinks=options.footnote_backlinks,
inline_footnotes=options.inline_footnotes,
real_footnotes=options.real_footnotes,
def_dpi=int(options.def_dpi),
basedir=options.basedir,
show_frame=options.show_frame,
splittables=options.splittables,
blank_first_page=options.blank_first_page,
first_page_on_right=options.first_page_on_right,
breakside=options.breakside,
custom_cover=options.custom_cover,
floating_images=options.floating_images,
numbered_links=options.numbered_links,
raw_html=options.raw_html,
section_header_depth=int(options.section_header_depth),
strip_elements_with_classes=options.strip_elements_with_classes,
).createPdf(text=options.infile.read(),
source_path=options.infile.name,
output=options.outfile,
compressed=options.compressed)
if close_infile:
infile.close()
# Ugly hack that fixes Issue 335
reportlab.lib.utils.ImageReader.__deepcopy__ = lambda self,*x: copy(self)
def patch_digester():
''' Patch digester so that we can get the same results when image
filenames change'''
import reportlab.pdfgen.canvas as canvas
cache = {}
def _digester(s):
index = cache.setdefault(s, len(cache))
return 'rst2pdf_image_%s' % index
canvas._digester = _digester
def patch_PDFDate():
'''Patch reportlab.pdfdoc.PDFDate so the invariant dates work correctly'''
from reportlab.pdfbase import pdfdoc
import reportlab
class PDFDate(pdfdoc.PDFObject):
__PDFObject__ = True
# gmt offset now suppported
def __init__(self, invariant=True, ts=None, dateFormatter=None):
now = (2000,01,01,00,00,00,0)
self.date = now[:6]
self.dateFormatter = dateFormatter
def format(self, doc):
from time import timezone
dhh, dmm = timezone // 3600, (timezone % 3600) % 60
dfmt = self.dateFormatter or (
lambda yyyy,mm,dd,hh,m,s:
"D:%04d%02d%02d%02d%02d%02d%+03d'%02d'" % (yyyy,mm,dd,hh,m,s,0,0))
return pdfdoc.format(pdfdoc.PDFString(dfmt(*self.date)), doc)
pdfdoc.PDFDate = PDFDate
reportlab.rl_config.invariant = 1
def add_extensions(options):
extensions = []
for ext in options.extensions:
if not ext.startswith('!'):
extensions.append(ext)
continue
ext = ext[1:]
try:
extensions.remove(ext)
except ValueError:
log.warning('Could not remove extension %s -- no such extension installed' % ext)
else:
log.info('Removed extension %s' % ext)
options.extensions[:] = extensions
if not extensions:
return
class ModuleProxy(object):
def __init__(self):
self.__dict__ = globals()
createpdf = ModuleProxy()
for modname in options.extensions:
prefix, modname = os.path.split(modname)
path_given = prefix
if modname.endswith('.py'):
modname = modname[:-3]
path_given = True
if not prefix:
prefix = os.path.join(os.path.dirname(__file__), 'extensions')
if prefix not in sys.path:
sys.path.append(prefix)
prefix = os.getcwd()
if prefix not in sys.path:
sys.path.insert(0, prefix)
log.info('Importing extension module %s', repr(modname))
firstname = path_given and modname or (modname + '_r2p')
try:
try:
module = __import__(firstname, globals(), locals())
except ImportError as e:
if firstname != str(e).split()[-1]:
raise
module = __import__(modname, globals(), locals())
except ImportError as e:
if str(e).split()[-1] not in [firstname, modname]:
raise
raise SystemExit('\nError: Could not find module %s '
'in sys.path [\n %s\n]\nExiting...\n' %
(modname, ',\n '.join(sys.path)))
if hasattr(module, 'install'):
module.install(createpdf, options)
def monkeypatch():
''' For initial test purposes, make reportlab 2.4 mostly perform like 2.3.
This allows us to compare PDFs more easily.
There are two sets of changes here:
1) rl_config.paraFontSizeHeightOffset = False
This reverts a change reportlab that messes up a lot of docs.
We may want to keep this one in here, or at least figure out
the right thing to do. If we do NOT keep this one here,
we will have documents look different in RL2.3 than they do
in RL2.4. This is probably unacceptable.
2) Everything else (below the paraFontSizeHeightOffset line):
These change some behavior in reportlab that affects the
graphics content stream without affecting the actual output.
We can remove these changes after making sure we are happy
and the checksums are good.
'''
import reportlab
from reportlab import rl_config
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfdoc
if getattr(reportlab, 'Version', None) != '2.4':
return
# NOTE: THIS IS A REAL DIFFERENCE -- DEFAULT y-offset FOR CHARS CHANGES!!!
rl_config.paraFontSizeHeightOffset = False
# Fix the preamble. 2.4 winds up injecting an extra space, so we toast it.
def new_make_preamble(self):
self._old_make_preamble()
self._preamble = ' '.join(self._preamble.split())
Canvas._old_make_preamble = Canvas._make_preamble
Canvas._make_preamble = new_make_preamble
# A new optimization removes the CR/LF between 'endstream' and 'endobj'
# Remove it for comparison
pdfdoc.INDIRECTOBFMT = pdfdoc.INDIRECTOBFMT.replace('CLINEEND', 'LINEEND')
# By default, transparency is set, and by default, that changes PDF version
# to 1.4 in RL 2.4.
pdfdoc.PDF_SUPPORT_VERSION['transparency'] = 1,3
monkeypatch()
def publish_secondary_doctree(text, main_tree, source_path):
# This is a hack so the text substitutions defined
# in the document are available when we process the cover
# page. See Issue 322
dt = main_tree
# Add substitutions from the main doctree
class addSubsts(Transform):
default_priority = 219
def apply(self):
self.document.substitution_defs.update(dt.substitution_defs)
self.document.substitution_names.update(dt.substitution_names)
# Use an own reader to modify transformations done.
class Reader(standalone.Reader):
def get_transforms(self):
default = standalone.Reader.get_transforms(self)
return (default + [ addSubsts, ])
# End of Issue 322 hack
return docutils.core.publish_doctree(text,
reader = Reader(), source_path=source_path)
if __name__ == "__main__":
main(sys.argv[1:])
| 38.368735 | 155 | 0.579961 |
0feb7fdcedee598b43f07983d1e7685de126022e
| 3,205 |
py
|
Python
|
qiskit_nature/results/vibrational_structure_result.py
|
Cryoris/qiskit-nature
|
c24a85140eb514628e2b9b1a5f0e03a689f8ade7
|
[
"Apache-2.0"
] | null | null | null |
qiskit_nature/results/vibrational_structure_result.py
|
Cryoris/qiskit-nature
|
c24a85140eb514628e2b9b1a5f0e03a689f8ade7
|
[
"Apache-2.0"
] | null | null | null |
qiskit_nature/results/vibrational_structure_result.py
|
Cryoris/qiskit-nature
|
c24a85140eb514628e2b9b1a5f0e03a689f8ade7
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The vibrational structure result."""
import logging
from typing import List, Optional
import numpy as np
from qiskit.algorithms import AlgorithmResult
from .eigenstate_result import EigenstateResult
logger = logging.getLogger(__name__)
class VibrationalStructureResult(EigenstateResult):
"""The vibrational structure result."""
def __init__(self) -> None:
super().__init__()
self._algorithm_result: Optional[AlgorithmResult] = None
self._computed_vibrational_energies: Optional[np.ndarray] = None
self._num_occupied_modals_per_mode: Optional[List[float]] = None
@property
def algorithm_result(self) -> Optional[AlgorithmResult]:
""" Returns raw algorithm result """
return self._algorithm_result
@algorithm_result.setter
def algorithm_result(self, value: AlgorithmResult) -> None:
""" Sets raw algorithm result """
self._algorithm_result = value
# TODO we need to be able to extract the statevector or the optimal parameters that can
# construct the circuit of the GS from here (if the algorithm supports this)
@property
def computed_vibrational_energies(self) -> Optional[np.ndarray]:
""" Returns computed electronic part of ground state energy """
return self._computed_vibrational_energies
@computed_vibrational_energies.setter
def computed_vibrational_energies(self, value: np.ndarray) -> None:
""" Sets computed electronic part of ground state energy """
self._computed_vibrational_energies = value
@property
def num_occupied_modals_per_mode(self) -> Optional[List[float]]:
""" Returns the number of occupied modal per mode """
return self._num_occupied_modals_per_mode
@num_occupied_modals_per_mode.setter
def num_occupied_modals_per_mode(self, value: List[float]) -> None:
""" Sets measured number of modes """
self._num_occupied_modals_per_mode = value
def __str__(self) -> str:
""" Printable formatted result """
return '\n'.join(self.formatted())
def formatted(self) -> List[str]:
""" Formatted result as a list of strings """
lines = []
lines.append('=== GROUND STATE ENERGY ===')
lines.append(' ')
lines.append('* Vibrational ground state energy (cm^-1): {}'.
format(np.round(self.computed_vibrational_energies[0], 12)))
if len(self.num_occupied_modals_per_mode) > 0:
lines.append('The number of occupied modals is')
for i in range(len(self.num_occupied_modals_per_mode)):
lines.append('- Mode {}: {}'.format(i, self.num_occupied_modals_per_mode[i]))
return lines
| 37.705882 | 91 | 0.697036 |
5cb71f035ce8ecbc7029d6f1bc9b9af93a27c046
| 646 |
py
|
Python
|
Lab3/persistentClient.py
|
LucaGuffanti/FCI
|
3a26b877d06939786eeca9749b99b5df9e270915
|
[
"Unlicense"
] | null | null | null |
Lab3/persistentClient.py
|
LucaGuffanti/FCI
|
3a26b877d06939786eeca9749b99b5df9e270915
|
[
"Unlicense"
] | null | null | null |
Lab3/persistentClient.py
|
LucaGuffanti/FCI
|
3a26b877d06939786eeca9749b99b5df9e270915
|
[
"Unlicense"
] | null | null | null |
# vogliamo terminare la connessione nell'invio di un punto
# applicazione client persistente tcp
from socket import *
# identifico il welcome socket del server, come coppia di IP e Porta
serverName = '192.168.1.3'
serverPort = 12000
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((serverName, serverPort))
while True:
message = input('Inserire una frase (. per fermare):')
clientSocket.send(message.encode('utf-8'))
if message == '.':
break
modifiedMessage = clientSocket.recv(1024)
modifiedMessage = modifiedMessage.decode('utf-8')
print(modifiedMessage)
clientSocket.close()
| 22.275862 | 68 | 0.718266 |
544aa480ceae8bc6fc79799d28e36280349591cd
| 15,487 |
py
|
Python
|
fred/turbine.py
|
TUDelft-DataDrivenControl/FRED
|
f837f4a126e693519fa5ab7c913cb26570ca5278
|
[
"MIT"
] | null | null | null |
fred/turbine.py
|
TUDelft-DataDrivenControl/FRED
|
f837f4a126e693519fa5ab7c913cb26570ca5278
|
[
"MIT"
] | null | null | null |
fred/turbine.py
|
TUDelft-DataDrivenControl/FRED
|
f837f4a126e693519fa5ab7c913cb26570ca5278
|
[
"MIT"
] | null | null | null |
from fenics import *
import fred.conf as conf
if conf.with_adjoint:
from fenics_adjoint import *
from pyadjoint import Block
from pyadjoint.overloaded_function import overload_function
import numpy as np
import scipy.interpolate
import logging
logger = logging.getLogger("cm.turbine")
import os
def read_rosco_curves():
filename = os.path.join(os.path.dirname(__file__),"../config/Cp_Ct_Cq.DTU10MW.txt")
with open(filename, "r") as f:
datafile = f.readlines()
for idx in range(len(datafile)):
if "Pitch angle" in datafile[idx]:
pitch_array = np.loadtxt(filename, skiprows=idx + 1, max_rows=1)
if "TSR vector" in datafile[idx]:
tsr_array = np.loadtxt(filename, skiprows=idx + 1, max_rows=1)
if "Wind speed" in datafile[idx]:
wind_speed = np.loadtxt(filename, skiprows=idx + 1, max_rows=1)
if "Power coefficient" in datafile[idx]:
cp_array = np.loadtxt(filename, skiprows=idx + 2, max_rows=len(tsr_array))
if "Thrust coefficient" in datafile[idx]:
ct_array = np.loadtxt(filename, skiprows=idx + 2, max_rows=len(tsr_array))
if "Torque coefficent" in datafile[idx]:
cq_array = np.loadtxt(filename, skiprows=idx + 2, max_rows=len(tsr_array))
pitch_grid, tsr_grid = np.meshgrid(pitch_array, tsr_array)
return pitch_grid, tsr_grid, ct_array, cp_array
def lookup_field(pitch_grid, tsr_grid, ct_array, cp_array):
# construct function space
sw_corner = Point(np.min(pitch_grid), np.min(tsr_grid))
ne_corner = Point(np.max(pitch_grid), np.max(tsr_grid))
(n_tsr, n_pitch) = pitch_grid.shape
# set function in function space
m = RectangleMesh(sw_corner, ne_corner, n_pitch + 1, n_tsr + 1)
fe = FiniteElement("Lagrange", m.ufl_cell(), 1)
fs = FunctionSpace(m, fe)
# assign values to function
dof_coords = fs.tabulate_dof_coordinates()
ct = Function(fs)
ct_interp = scipy.interpolate.interp2d(pitch_grid[0, :], tsr_grid[:, 0], ct_array, kind='linear')
ct_values = ct.vector().get_local()
cp = Function(fs)
cp_interp = scipy.interpolate.interp2d(pitch_grid[0, :], tsr_grid[:, 0], cp_array, kind='linear')
cp_values = cp.vector().get_local()
logger.warning("Limiting 0<=ct<=1 for axial induction calculations")
for idx in range(len(dof_coords)):
pitch, tsr = dof_coords[idx]
ct_values[idx] = np.min((np.max((ct_interp(pitch, tsr),0.)),1.))
cp_values[idx] = np.min((np.max((cp_interp(pitch, tsr),0.)),1.))
a = 0.5 - 0.5*(np.sqrt(1-ct_values[idx]))
# convert to local
ct_values[idx] = ct_values[idx] / (1 - a)
cp_values[idx] = cp_values[idx] / (1 - a)**2
ct.vector().set_local(ct_values)
cp.vector().set_local(cp_values)
# write ct and cp field to output file for visual inspection
# ct_file = File("ct.pvd")
# cp_file = File("cp.pvd")
# ct_file.write(ct)
# cp_file.write(cp)
return ct, cp
def get_coefficient(func, pitch, tsr):
return func(pitch, tsr)
backend_get_coefficient = get_coefficient
class CoefficientBlock(Block):
def __init__(self, func, pitch, tsr, **kwargs):
super(CoefficientBlock, self).__init__()
self.kwargs = kwargs
self.func = func
self.add_dependency(pitch)
self.add_dependency(tsr)
degree = func.function_space().ufl_element().degree()
family = func.function_space().ufl_element().family()
mesh = func.function_space().mesh()
if np.isin(family, ["CG", "Lagrange"]):
self.V = FunctionSpace(mesh, "DG", degree - 1)
else:
raise NotImplementedError(
"Not implemented for other elements than Lagrange")
def __str__(self):
return "CoefficientBlock"
def evaluate_adj_component(self, inputs, adj_inputs, block_variable, idx, prepared=None):
# output = get_derivative(inputs[0], inputs[1], idx) * adj_inputs[0]
grad_idx = project(self.func.dx(idx), self.V)
output = grad_idx(inputs[0], inputs[1]) * adj_inputs[0]
return output
def recompute_component(self, inputs, block_variable, idx, prepared):
return backend_get_coefficient(self.func, inputs[0], inputs[1])
get_coefficient = overload_function(get_coefficient, CoefficientBlock)
class Turbine:
"""Wind turbine class"""
def __init__(self, position, yaw):
"""Initialise wind turbine
Args:
position (list of floats): [x, y] position of turbine in the wind farm (m)
yaw: initial turbine yaw angle (rad)
"""
logger.info("Initialising turbine at ({:5.0f}, {:5.0f})".format(position[0], position[1]))
self._position = position
self._yaw_ref = Constant(yaw)
if conf.par.turbine.yaw_rate_limit > 0:
self._yaw_rate_limit = Constant(conf.par.turbine.yaw_rate_limit)
else:
logger.info("Turbine has no rate limit.")
self._yaw_rate_limit = None
self._yaw = self._yaw_ref
self._radius = conf.par.turbine.radius
self._diameter = conf.par.turbine.diameter
self._area = pi * self._radius ** 2
self._thickness = conf.par.turbine.thickness
self._hub_height = conf.par.turbine.hub_height
self._axial_induction = Constant(conf.par.turbine.axial_induction)
self._pitch = Constant(0.)
self._tip_speed_ratio = Constant(0.)
self._torque = Constant(0.)
if conf.par.turbine.coefficients == "induction":
self._thrust_coefficient_prime = self._compute_ct_prime(self._axial_induction)
self._power_coefficient_prime = self._thrust_coefficient_prime * (1 - self._axial_induction)
elif conf.par.turbine.coefficients == "lut":
self._pitch.assign(conf.par.turbine.pitch) # todo: load from config file
# todo: implement first order turbine model for torque to tipspeed ratio
self._torque.assign(conf.par.turbine.torque)
self._tip_speed_ratio.assign(conf.par.turbine.torque)
# load ct and cp look-up table from file
pitch_grid, tsr_grid, ct_array, cp_array = read_rosco_curves()
self._ct_function, self._cp_function = lookup_field(pitch_grid, tsr_grid, ct_array, cp_array)
self._thrust_coefficient_prime = Constant(0.)
self._power_coefficient_prime = Constant(0.)
self._update_coefficients()
logger.warning("Setting turbine coefficients from LUT not yet fully implemented")
else:
raise KeyError(
"Invalid method for ct/cp calculations: {} is not defined".format(conf.par.turbine.coefficients))
self._force = None
self._power = None
self._velocity = None
self._kernel = None
def _compute_ct_prime(self, a):
"""Calculate thrust coefficient from axial induction.
Args:
a (float): axial induction factor (-)
"""
ct = 4 * a * (1 - a)
ctp = ct / pow((1 - a), 2)
return ctp
def _update_coefficients(self):
ct = get_coefficient(self._ct_function, self._pitch, self._tip_speed_ratio)
cp = get_coefficient(self._cp_function, self._pitch, self._tip_speed_ratio)
a = ct/4 #float(0.5 - 0.5 * sqrt(1-np.min((ct,1.))))
logger.info("Evaluating coefficients for pitch {:.2f}, tsr {:.2f} - ct {:.2f}, cp {:.2f}".format(float(self._pitch),
float(self._tip_speed_ratio),
ct*(1-a),
cp*(1-a)**2))
self._axial_induction.assign(a)
# self._axial_induction = 0.5 - 0.5 * sqrt(1-ct)
# ct = 1.
# cp = 0.4
# self._thrust_coefficient_prime =(ct / (1 - self._axial_induction))
# self._power_coefficient_prime =(cp / pow((1 - self._axial_induction), 2))
self._thrust_coefficient_prime.assign(ct) # / (1 - a))
self._power_coefficient_prime.assign(cp) #3 / pow((1 - a), 2))
def set_yaw_ref(self, new_yaw_ref):
"""Set the turbine to new yaw reference angle.
Assigns the specified values to the Dolfin `Constant` storing the yaw angle.
Args:
new_yaw_ref (float): new turbine yaw angle (rad)
"""
self._yaw_ref.assign(new_yaw_ref)
def compute_forcing(self, u):
"""Calculate the turbine forcing effect on the flow.
Args:
u (Function): vector velocity field
"""
if conf.par.simulation.dimensions == 2:
return self._compute_turbine_forcing_two_dim(u)
elif conf.par.simulation.dimensions == 3:
raise NotImplementedError("Three-dimensional turbine forcing not yet defined")
# return self._compute_force_three_dim(u)
else:
raise ValueError("Invalid dimension.")
def _compute_turbine_forcing_two_dim(self, u):
"""Computes two-dimensional turbine forcing based on Actuator-Disk Model.
Depending on the specification in the configuration file, the force is distributed using a kernel similar to
the work by R. King (2017), or using a conventional Gaussian kernel.
Args:
u (Function): two-dimensional vectory velocity field
Returns:
Function: two-dimensional vector force field.
"""
force_constant = 0.5 * conf.par.flow.density * self._area * self._thrust_coefficient_prime
power_constant = 0.5 * conf.par.flow.density * self._area * self._power_coefficient_prime
ud = u[0] * - sin(self._yaw) + u[1] * - cos(self._yaw)
x = SpatialCoordinate(u)
# turbine position
xt = self._position[0]
yt = self._position[1]
# shift spatial coordinate
xs = x[0] - xt
ys = x[1] - yt
# rotate spatial coordinate
xr = -sin(self._yaw) * xs - cos(self._yaw) * ys
yr = cos(self._yaw) * xs - sin(self._yaw) * ys
# formulate forcing kernel
# 1.85544, 2.91452 are magic numbers that make kernel integrate to 1.
r = self._radius
w = self._thickness
gamma = 6
if conf.par.turbine.kernel == "king":
logger.info("Turbine forcing kernel as in work by R. King (2017)")
kernel = exp(-1 * pow(xr / w, gamma)) / (1.85544 * w) * \
exp(-1 * pow(pow(yr / r, 2), gamma)) / (2.91452 * pow(r, 2))
elif conf.par.turbine.kernel == "gaussian":
logger.info("Turbine forcing with gaussian distribution")
r = self._radius * 0.6
w = self._thickness
zr = 0
kernel = (exp(-1.0 * pow(xr / w, 6)) / (w * 1.85544)) * \
(exp(-0.5 * pow(yr / r, 2)) / (r * sqrt(2 * pi))) * \
(exp(-0.5 * pow(zr / r, 2)) / (r * sqrt(2 * pi)))
# compute forcing function with kernel
# scale = conf.par.turbine.deflection_scale
# axial_scale = conf.par.turbine.force_scale_axial
# transverse_scale = conf.par.turbine.force_scale_transverse
# # logger.info("Scaling force for wake deflection by factor {:.1f}".format(scale))
# logger.info("Scaling turbine force - axial : {:.2f} - transverse : {:.2f}".format(axial_scale, transverse_scale))
# forcing = -1 * force_constant * kernel * as_vector((-axial_scale*sin(self._yaw), -transverse_scale * cos(self._yaw))) * ud ** 2
forcing = force_constant * kernel * as_vector((sin(self._yaw), cos(self._yaw))) * ud ** 2
# todo: check this
power_scale = conf.par.turbine.power_scale
power = power_scale * power_constant * kernel * ud ** 3
# The above computation yields a two-dimensional body force.
# This is scaled to a 3D equivalent for output.
fscale = pi * 0.5 * self._radius
# todo: compute accurate 3D scaling factor
self._force = forcing * fscale
self._power = power * fscale
# self._power = - self._force*ud #dot(self._force, u)
self._kernel = kernel * fscale
self._velocity = u * kernel * fscale
return forcing
def get_yaw(self):
"""Get turbine yaw angle.
Returns:
float: turbine yaw angle (rad)
"""
return float(self._yaw)
def get_force(self):
"""Get current turbine force.
Performs integration of turbine forcing kernel over wind farm domain.
Returns:
list of floats: x and y component of turbine force (N)
"""
return [assemble(self._force[0] * dx), assemble(self._force[1] * dx)]
def get_power(self):
"""Get current turbine power.
Performs integration of power kernel over wind farm domain.
Returns:
float: turbine power (W)
"""
return assemble(self._power * dx)
def get_velocity(self):
# return [assemble(self._velocity * dx),-1]
return [assemble(self._velocity[0] * dx), assemble(self._velocity[1] * dx)]
def get_kernel(self):
"""Get the integrated kernel value.
Perform integration of kernel over wind farm domain. Kernel should integrate to 1.
Returns:
float: kernel size (-)
"""
return assemble(self._kernel * dx)
def get_axial_induction(self):
"""Get the axial induction factor.
Returns:
float: turbine axial induction factor (-)
"""
return float(self._axial_induction)
# def set_axial_induction(self, new_axial_induction):
# """Set the turbine to the new axial induction factor.
#
# Args:
# new_axial_induction (float): new axial induction factor (-)
#
# """
# self._axial_induction.assign(new_axial_induction)
def get_pitch(self):
return float(self._pitch)
def get_torque(self):
return float(self._torque)
def set_pitch_and_torque(self, new_pitch=0., new_torque=0.):
self._pitch.assign(new_pitch)
self._torque.assign(new_torque)
logger.warning("Linking torque control directly to TSR because turbine model not yet implemented")
#todo: implement first order turbine model
self._tip_speed_ratio.assign(new_torque)
if conf.par.turbine.coefficients == "lut":
self._update_coefficients()
def get_tip_speed_ratio(self):
return float(self._tip_speed_ratio)
def set_control(self, name, value):
if name == "yaw":
self._yaw_ref.assign(value)
elif name == "axial_induction":
self._axial_induction.assign(value)
elif name == "pitch":
self._pitch.assign(value)
if conf.par.turbine.coefficients == "lut":
self._update_coefficients()
elif name == "torque":
self._torque.assign(value)
logger.warning("Linking torque control directly to TSR because turbine model not yet implemented")
self._tip_speed_ratio.assign(value)
if conf.par.turbine.coefficients == "lut":
self._update_coefficients()
else:
raise ValueError("Control {} not known to turbine".format(name))
| 38.524876 | 137 | 0.613741 |
ae176ee5f87d531b3b0b840f2accc49f52cbdde7
| 18,808 |
py
|
Python
|
sympy/core/tests/test_sympify.py
|
harsh-98/sympy
|
53fc684467088cdf0acccb6ad770cbde97e32268
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/tests/test_sympify.py
|
harsh-98/sympy
|
53fc684467088cdf0acccb6ad770cbde97e32268
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/core/tests/test_sympify.py
|
harsh-98/sympy
|
53fc684467088cdf0acccb6ad770cbde97e32268
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy import (Symbol, exp, Integer, Float, sin, cos, log, Poly, Lambda,
Function, I, S, N, sqrt, srepr, Rational, Tuple, Matrix, Interval, Add, Mul,
Pow, Or, true, false, Abs, pi, Range)
from sympy.abc import x, y
from sympy.core.sympify import sympify, _sympify, SympifyError, kernS
from sympy.core.decorators import _sympifyit
from sympy.external import import_module
from sympy.utilities.pytest import raises, XFAIL, skip
from sympy.utilities.decorator import conserve_mpmath_dps
from sympy.geometry import Point, Line
from sympy.functions.combinatorial.factorials import factorial, factorial2
from sympy.abc import _clash, _clash1, _clash2
from sympy.core.compatibility import exec_, HAS_GMPY, PY3
from sympy.sets import FiniteSet, EmptySet
from sympy.tensor.array.dense_ndim_array import ImmutableDenseNDimArray
from sympy.external import import_module
import mpmath
numpy = import_module('numpy')
def test_issue_3538():
v = sympify("exp(x)")
assert v == exp(x)
assert type(v) == type(exp(x))
assert str(type(v)) == str(type(exp(x)))
def test_sympify1():
assert sympify("x") == Symbol("x")
assert sympify(" x") == Symbol("x")
assert sympify(" x ") == Symbol("x")
# issue 4877
n1 = Rational(1, 2)
assert sympify('--.5') == n1
assert sympify('-1/2') == -n1
assert sympify('-+--.5') == -n1
assert sympify('-.[3]') == Rational(-1, 3)
assert sympify('.[3]') == Rational(1, 3)
assert sympify('+.[3]') == Rational(1, 3)
assert sympify('+0.[3]*10**-2') == Rational(1, 300)
assert sympify('.[052631578947368421]') == Rational(1, 19)
assert sympify('.0[526315789473684210]') == Rational(1, 19)
assert sympify('.034[56]') == Rational(1711, 49500)
# options to make reals into rationals
assert sympify('1.22[345]', rational=True) == \
1 + Rational(22, 100) + Rational(345, 99900)
assert sympify('2/2.6', rational=True) == Rational(10, 13)
assert sympify('2.6/2', rational=True) == Rational(13, 10)
assert sympify('2.6e2/17', rational=True) == Rational(260, 17)
assert sympify('2.6e+2/17', rational=True) == Rational(260, 17)
assert sympify('2.6e-2/17', rational=True) == Rational(26, 17000)
assert sympify('2.1+3/4', rational=True) == \
Rational(21, 10) + Rational(3, 4)
assert sympify('2.234456', rational=True) == Rational(279307, 125000)
assert sympify('2.234456e23', rational=True) == 223445600000000000000000
assert sympify('2.234456e-23', rational=True) == \
Rational(279307, 12500000000000000000000000000)
assert sympify('-2.234456e-23', rational=True) == \
Rational(-279307, 12500000000000000000000000000)
assert sympify('12345678901/17', rational=True) == \
Rational(12345678901, 17)
assert sympify('1/.3 + x', rational=True) == Rational(10, 3) + x
# make sure longs in fractions work
assert sympify('222222222222/11111111111') == \
Rational(222222222222, 11111111111)
# ... even if they come from repetend notation
assert sympify('1/.2[123456789012]') == Rational(333333333333, 70781892967)
# ... or from high precision reals
assert sympify('.1234567890123456', rational=True) == \
Rational(19290123283179, 156250000000000)
def test_sympify_Fraction():
try:
import fractions
except ImportError:
pass
else:
value = sympify(fractions.Fraction(101, 127))
assert value == Rational(101, 127) and type(value) is Rational
def test_sympify_gmpy():
if HAS_GMPY:
if HAS_GMPY == 2:
import gmpy2 as gmpy
elif HAS_GMPY == 1:
import gmpy
value = sympify(gmpy.mpz(1000001))
assert value == Integer(1000001) and type(value) is Integer
value = sympify(gmpy.mpq(101, 127))
assert value == Rational(101, 127) and type(value) is Rational
@conserve_mpmath_dps
def test_sympify_mpmath():
value = sympify(mpmath.mpf(1.0))
assert value == Float(1.0) and type(value) is Float
mpmath.mp.dps = 12
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159265359"), Float("1e-12")) == True
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159265359"), Float("1e-13")) == False
mpmath.mp.dps = 6
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159"), Float("1e-5")) == True
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159"), Float("1e-6")) == False
assert sympify(mpmath.mpc(1.0 + 2.0j)) == Float(1.0) + Float(2.0)*I
def test_sympify2():
class A:
def _sympy_(self):
return Symbol("x")**3
a = A()
assert _sympify(a) == x**3
assert sympify(a) == x**3
assert a == x**3
def test_sympify3():
assert sympify("x**3") == x**3
assert sympify("x^3") == x**3
assert sympify("1/2") == Integer(1)/2
raises(SympifyError, lambda: _sympify('x**3'))
raises(SympifyError, lambda: _sympify('1/2'))
def test_sympify_keywords():
raises(SympifyError, lambda: sympify('if'))
raises(SympifyError, lambda: sympify('for'))
raises(SympifyError, lambda: sympify('while'))
raises(SympifyError, lambda: sympify('lambda'))
def test_sympify_float():
assert sympify("1e-64") != 0
assert sympify("1e-20000") != 0
def test_sympify_bool():
assert sympify(True) is true
assert sympify(False) is false
def test_sympyify_iterables():
ans = [Rational(3, 10), Rational(1, 5)]
assert sympify(['.3', '.2'], rational=True) == ans
assert sympify(tuple(['.3', '.2']), rational=True) == Tuple(*ans)
assert sympify(dict(x=0, y=1)) == {x: 0, y: 1}
assert sympify(['1', '2', ['3', '4']]) == [S(1), S(2), [S(3), S(4)]]
def test_sympify4():
class A:
def _sympy_(self):
return Symbol("x")
a = A()
assert _sympify(a)**3 == x**3
assert sympify(a)**3 == x**3
assert a == x
def test_sympify_text():
assert sympify('some') == Symbol('some')
assert sympify('core') == Symbol('core')
assert sympify('True') is True
assert sympify('False') is False
assert sympify('Poly') == Poly
assert sympify('sin') == sin
def test_sympify_function():
assert sympify('factor(x**2-1, x)') == -(1 - x)*(x + 1)
assert sympify('sin(pi/2)*cos(pi)') == -Integer(1)
def test_sympify_poly():
p = Poly(x**2 + x + 1, x)
assert _sympify(p) is p
assert sympify(p) is p
def test_sympify_factorial():
assert sympify('x!') == factorial(x)
assert sympify('(x+1)!') == factorial(x + 1)
assert sympify('(1 + y*(x + 1))!') == factorial(1 + y*(x + 1))
assert sympify('(1 + y*(x + 1)!)^2') == (1 + y*factorial(x + 1))**2
assert sympify('y*x!') == y*factorial(x)
assert sympify('x!!') == factorial2(x)
assert sympify('(x+1)!!') == factorial2(x + 1)
assert sympify('(1 + y*(x + 1))!!') == factorial2(1 + y*(x + 1))
assert sympify('(1 + y*(x + 1)!!)^2') == (1 + y*factorial2(x + 1))**2
assert sympify('y*x!!') == y*factorial2(x)
assert sympify('factorial2(x)!') == factorial(factorial2(x))
raises(SympifyError, lambda: sympify("+!!"))
raises(SympifyError, lambda: sympify(")!!"))
raises(SympifyError, lambda: sympify("!"))
raises(SympifyError, lambda: sympify("(!)"))
raises(SympifyError, lambda: sympify("x!!!"))
def test_sage():
# how to effectivelly test for the _sage_() method without having SAGE
# installed?
assert hasattr(x, "_sage_")
assert hasattr(Integer(3), "_sage_")
assert hasattr(sin(x), "_sage_")
assert hasattr(cos(x), "_sage_")
assert hasattr(x**2, "_sage_")
assert hasattr(x + y, "_sage_")
assert hasattr(exp(x), "_sage_")
assert hasattr(log(x), "_sage_")
def test_issue_3595():
assert sympify("a_") == Symbol("a_")
assert sympify("_a") == Symbol("_a")
def test_lambda():
x = Symbol('x')
assert sympify('lambda: 1') == Lambda((), 1)
assert sympify('lambda x: x') == Lambda(x, x)
assert sympify('lambda x: 2*x') == Lambda(x, 2*x)
assert sympify('lambda x, y: 2*x+y') == Lambda([x, y], 2*x + y)
def test_lambda_raises():
raises(SympifyError, lambda: sympify("lambda *args: args")) # args argument error
raises(SympifyError, lambda: sympify("lambda **kwargs: kwargs[0]")) # kwargs argument error
raises(SympifyError, lambda: sympify("lambda x = 1: x")) # Keyword argument error
with raises(SympifyError):
_sympify('lambda: 1')
def test_sympify_raises():
raises(SympifyError, lambda: sympify("fx)"))
def test__sympify():
x = Symbol('x')
f = Function('f')
# positive _sympify
assert _sympify(x) is x
assert _sympify(f) is f
assert _sympify(1) == Integer(1)
assert _sympify(0.5) == Float("0.5")
assert _sympify(1 + 1j) == 1.0 + I*1.0
class A:
def _sympy_(self):
return Integer(5)
a = A()
assert _sympify(a) == Integer(5)
# negative _sympify
raises(SympifyError, lambda: _sympify('1'))
raises(SympifyError, lambda: _sympify([1, 2, 3]))
def test_sympifyit():
x = Symbol('x')
y = Symbol('y')
@_sympifyit('b', NotImplemented)
def add(a, b):
return a + b
assert add(x, 1) == x + 1
assert add(x, 0.5) == x + Float('0.5')
assert add(x, y) == x + y
assert add(x, '1') == NotImplemented
@_sympifyit('b')
def add_raises(a, b):
return a + b
assert add_raises(x, 1) == x + 1
assert add_raises(x, 0.5) == x + Float('0.5')
assert add_raises(x, y) == x + y
raises(SympifyError, lambda: add_raises(x, '1'))
def test_int_float():
class F1_1(object):
def __float__(self):
return 1.1
class F1_1b(object):
"""
This class is still a float, even though it also implements __int__().
"""
def __float__(self):
return 1.1
def __int__(self):
return 1
class F1_1c(object):
"""
This class is still a float, because it implements _sympy_()
"""
def __float__(self):
return 1.1
def __int__(self):
return 1
def _sympy_(self):
return Float(1.1)
class I5(object):
def __int__(self):
return 5
class I5b(object):
"""
This class implements both __int__() and __float__(), so it will be
treated as Float in SymPy. One could change this behavior, by using
float(a) == int(a), but deciding that integer-valued floats represent
exact numbers is arbitrary and often not correct, so we do not do it.
If, in the future, we decide to do it anyway, the tests for I5b need to
be changed.
"""
def __float__(self):
return 5.0
def __int__(self):
return 5
class I5c(object):
"""
This class implements both __int__() and __float__(), but also
a _sympy_() method, so it will be Integer.
"""
def __float__(self):
return 5.0
def __int__(self):
return 5
def _sympy_(self):
return Integer(5)
i5 = I5()
i5b = I5b()
i5c = I5c()
f1_1 = F1_1()
f1_1b = F1_1b()
f1_1c = F1_1c()
assert sympify(i5) == 5
assert isinstance(sympify(i5), Integer)
assert sympify(i5b) == 5
assert isinstance(sympify(i5b), Float)
assert sympify(i5c) == 5
assert isinstance(sympify(i5c), Integer)
assert abs(sympify(f1_1) - 1.1) < 1e-5
assert abs(sympify(f1_1b) - 1.1) < 1e-5
assert abs(sympify(f1_1c) - 1.1) < 1e-5
assert _sympify(i5) == 5
assert isinstance(_sympify(i5), Integer)
assert _sympify(i5b) == 5
assert isinstance(_sympify(i5b), Float)
assert _sympify(i5c) == 5
assert isinstance(_sympify(i5c), Integer)
assert abs(_sympify(f1_1) - 1.1) < 1e-5
assert abs(_sympify(f1_1b) - 1.1) < 1e-5
assert abs(_sympify(f1_1c) - 1.1) < 1e-5
def test_evaluate_false():
cases = {
'2 + 3': Add(2, 3, evaluate=False),
'2**2 / 3': Mul(Pow(2, 2, evaluate=False), Pow(3, -1, evaluate=False), evaluate=False),
'2 + 3 * 5': Add(2, Mul(3, 5, evaluate=False), evaluate=False),
'2 - 3 * 5': Add(2, -Mul(3, 5, evaluate=False), evaluate=False),
'1 / 3': Mul(1, Pow(3, -1, evaluate=False), evaluate=False),
'True | False': Or(True, False, evaluate=False),
'1 + 2 + 3 + 5*3 + integrate(x)': Add(1, 2, 3, Mul(5, 3, evaluate=False), x**2/2, evaluate=False),
'2 * 4 * 6 + 8': Add(Mul(2, 4, 6, evaluate=False), 8, evaluate=False),
}
for case, result in cases.items():
assert sympify(case, evaluate=False) == result
def test_issue_4133():
a = sympify('Integer(4)')
assert a == Integer(4)
assert a.is_Integer
def test_issue_3982():
a = [3, 2.0]
assert sympify(a) == [Integer(3), Float(2.0)]
assert sympify(tuple(a)) == Tuple(Integer(3), Float(2.0))
assert sympify(set(a)) == FiniteSet(Integer(3), Float(2.0))
def test_S_sympify():
assert S(1)/2 == sympify(1)/2
assert (-2)**(S(1)/2) == sqrt(2)*I
def test_issue_4788():
assert srepr(S(1.0 + 0J)) == srepr(S(1.0)) == srepr(Float(1.0))
def test_issue_4798_None():
assert S(None) is None
def test_issue_3218():
assert sympify("x+\ny") == x + y
def test_issue_4988_builtins():
C = Symbol('C')
vars = {}
vars['C'] = C
exp1 = sympify('C')
assert exp1 == C # Make sure it did not get mixed up with sympy.C
exp2 = sympify('C', vars)
assert exp2 == C # Make sure it did not get mixed up with sympy.C
def test_geometry():
p = sympify(Point(0, 1))
assert p == Point(0, 1) and isinstance(p, Point)
L = sympify(Line(p, (1, 0)))
assert L == Line((0, 1), (1, 0)) and isinstance(L, Line)
def test_kernS():
s = '-1 - 2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x)))'
# when 1497 is fixed, this no longer should pass: the expression
# should be unchanged
assert -1 - 2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) == -1
# sympification should not allow the constant to enter a Mul
# or else the structure can change dramatically
ss = kernS(s)
assert ss != -1 and ss.simplify() == -1
s = '-1 - 2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x)))'.replace(
'x', '_kern')
ss = kernS(s)
assert ss != -1 and ss.simplify() == -1
# issue 6687
assert kernS('Interval(-1,-2 - 4*(-3))') == Interval(-1, 10)
assert kernS('_kern') == Symbol('_kern')
assert kernS('E**-(x)') == exp(-x)
e = 2*(x + y)*y
assert kernS(['2*(x + y)*y', ('2*(x + y)*y',)]) == [e, (e,)]
assert kernS('-(2*sin(x)**2 + 2*sin(x)*cos(x))*y/2') == \
-y*(2*sin(x)**2 + 2*sin(x)*cos(x))/2
def test_issue_6540_6552():
assert S('[[1/3,2], (2/5,)]') == [[Rational(1, 3), 2], (Rational(2, 5),)]
assert S('[[2/6,2], (2/4,)]') == [[Rational(1, 3), 2], (Rational(1, 2),)]
assert S('[[[2*(1)]]]') == [[[2]]]
assert S('Matrix([2*(1)])') == Matrix([2])
def test_issue_6046():
assert str(S("Q & C", locals=_clash1)) == 'C & Q'
assert str(S('pi(x)', locals=_clash2)) == 'pi(x)'
assert str(S('pi(C, Q)', locals=_clash)) == 'pi(C, Q)'
locals = {}
exec_("from sympy.abc import Q, C", locals)
assert str(S('C&Q', locals)) == 'C & Q'
def test_issue_8821_highprec_from_str():
s = str(pi.evalf(128))
p = sympify(s)
assert Abs(sin(p)) < 1e-127
def test_issue_10295():
if not numpy:
skip("numpy not installed.")
A = numpy.array([[1, 3, -1],
[0, 1, 7]])
sA = S(A)
assert sA.shape == (2, 3)
for (ri, ci), val in numpy.ndenumerate(A):
assert sA[ri, ci] == val
B = numpy.array([-7, x, 3*y**2])
sB = S(B)
assert B[0] == -7
assert B[1] == x
assert B[2] == 3*y**2
C = numpy.arange(0, 24)
C.resize(2,3,4)
sC = S(C)
assert sC[0, 0, 0].is_integer
assert sC[0, 0, 0] == 0
a1 = numpy.array([1, 2, 3])
a2 = numpy.array([i for i in range(24)])
a2.resize(2, 4, 3)
assert sympify(a1) == ImmutableDenseNDimArray([1, 2, 3])
assert sympify(a2) == ImmutableDenseNDimArray([i for i in range(24)], (2, 4, 3))
def test_Range():
# Only works in Python 3 where range returns a range type
if PY3:
builtin_range = range
else:
builtin_range = xrange
assert sympify(builtin_range(10)) == Range(10)
assert _sympify(builtin_range(10)) == Range(10)
def test_sympify_set():
n = Symbol('n')
assert sympify({n}) == FiniteSet(n)
assert sympify(set()) == EmptySet()
def test_sympify_numpy():
if not numpy:
skip('numpy not installed. Abort numpy tests.')
np = numpy
def equal(x, y):
return x == y and type(x) == type(y)
assert sympify(np.bool_(1)) is S(True)
try:
assert equal(
sympify(np.int_(1234567891234567891)), S(1234567891234567891))
assert equal(
sympify(np.intp(1234567891234567891)), S(1234567891234567891))
except OverflowError:
# May fail on 32-bit systems: Python int too large to convert to C long
pass
assert equal(sympify(np.intc(1234567891)), S(1234567891))
assert equal(sympify(np.int8(-123)), S(-123))
assert equal(sympify(np.int16(-12345)), S(-12345))
assert equal(sympify(np.int32(-1234567891)), S(-1234567891))
assert equal(
sympify(np.int64(-1234567891234567891)), S(-1234567891234567891))
assert equal(sympify(np.uint8(123)), S(123))
assert equal(sympify(np.uint16(12345)), S(12345))
assert equal(sympify(np.uint32(1234567891)), S(1234567891))
assert equal(
sympify(np.uint64(1234567891234567891)), S(1234567891234567891))
assert equal(sympify(np.float32(1.123456)), Float(1.123456, precision=24))
assert equal(sympify(np.float64(1.1234567891234)),
Float(1.1234567891234, precision=53))
assert equal(sympify(np.longdouble(1.123456789)),
Float(1.123456789, precision=80))
assert equal(sympify(np.complex64(1 + 2j)), S(1.0 + 2.0*I))
assert equal(sympify(np.complex128(1 + 2j)), S(1.0 + 2.0*I))
assert equal(sympify(np.longcomplex(1 + 2j)), S(1.0 + 2.0*I))
try:
assert equal(sympify(np.float96(1.123456789)),
Float(1.123456789, precision=80))
except AttributeError: #float96 does not exist on all platforms
pass
try:
assert equal(sympify(np.float128(1.123456789123)),
Float(1.123456789123, precision=80))
except AttributeError: #float128 does not exist on all platforms
pass
@XFAIL
def test_sympify_rational_numbers_set():
ans = [Rational(3, 10), Rational(1, 5)]
assert sympify({'.3', '.2'}, rational=True) == FiniteSet(*ans)
| 31.087603 | 106 | 0.595704 |
b06f15b55274f93b6baaf3357575243970c6af00
| 232 |
py
|
Python
|
server/apps/main/admin.py
|
JKHeadley/django-aws-template
|
3133a8c2184bd85c3c4e7e185e1f5ca07fa31fc6
|
[
"MIT"
] | null | null | null |
server/apps/main/admin.py
|
JKHeadley/django-aws-template
|
3133a8c2184bd85c3c4e7e185e1f5ca07fa31fc6
|
[
"MIT"
] | null | null | null |
server/apps/main/admin.py
|
JKHeadley/django-aws-template
|
3133a8c2184bd85c3c4e7e185e1f5ca07fa31fc6
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
class ContactMessageAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'email')
"""
Register Admin Pages
"""
admin.site.register(ContactMessage, ContactMessageAdmin)
| 19.333333 | 56 | 0.75 |
65d6bd5d9864e4fc366ae7c292ef1cef5bda0651
| 9,948 |
py
|
Python
|
app/services/simulation_service.py
|
MrDanCoelho/Desafio-Python
|
a7a65628e9e9d541f24b5c9684c247c7d5fa645d
|
[
"MIT"
] | null | null | null |
app/services/simulation_service.py
|
MrDanCoelho/Desafio-Python
|
a7a65628e9e9d541f24b5c9684c247c7d5fa645d
|
[
"MIT"
] | null | null | null |
app/services/simulation_service.py
|
MrDanCoelho/Desafio-Python
|
a7a65628e9e9d541f24b5c9684c247c7d5fa645d
|
[
"MIT"
] | null | null | null |
from app.domain.models.resultado_model import resultado_model
from app.domain.models.propriedade_model import propriedade_model
from app.domain.enums.enum_tipo_jogador import enum_tipo_jogador
from app.domain.models.jogador_model import jogador_model
import time
import random
class simulation_service:
def jogador_impulsivo(self, jogador: jogador_model, propriedade: propriedade_model) -> None:
"""Comportamento de um jogador impulsivo dentro da simulação de Banco Imobiliário
Args:
jogador (jogador_model): o jogador que esta fazendo a ação
propriedade (propriedade_model): a propriedade onde o jogador se encontra
"""
if jogador.saldo >= propriedade.valor_compra:
self.comprar_propriedade(jogador, propriedade)
def jogador_exigente(self, jogador: jogador_model, propriedade: propriedade_model) -> None:
"""Comportamento de um jogador exigente dentro da simulação de Banco Imobiliário
Args:
jogador (jogador_model): o jogador que esta fazendo a ação
propriedade (propriedade_model): a propriedade onde o jogador se encontra
"""
if jogador.saldo >= propriedade.valor_compra and propriedade.valor_aluguel >= 50:
self.comprar_propriedade(jogador, propriedade)
def jogador_cauteloso(self, jogador: jogador_model, propriedade: propriedade_model) -> None:
"""Comportamento de um jogador cauteloso dentro da simulação de Banco Imobiliário
Args:
jogador (jogador_model): o jogador que esta fazendo a ação
propriedade (propriedade_model): a propriedade onde o jogador se encontra
"""
if jogador.saldo >= propriedade.valor_compra and jogador.saldo >= 80:
self.comprar_propriedade(jogador, propriedade)
def jogador_aleatorio(self, jogador: jogador_model, propriedade: propriedade_model) -> None:
"""Comportamento de um jogador aleatório dentro da simulação de Banco Imobiliário
Args:
jogador (jogador_model): o jogador que esta fazendo a ação
propriedade (propriedade_model): a propriedade onde o jogador se encontra
"""
if jogador.saldo >= propriedade.valor_compra and random.randrange(0, 1) == 1:
self.comprar_propriedade(jogador, propriedade)
def comprar_propriedade(self, jogador: jogador_model, propriedade: propriedade_model) -> None:
"""Método para simularb a compra de uma propriedade dentro do Banco Imobiliário
Args:
jogador (jogador_model): o jogador que esta fazendo a compra
propriedade (propriedade_model): a propriedade que esta sendo comprada
"""
propriedade.proprietario = jogador
jogador.saldo -= propriedade.valor_compra
def simular(self) -> str:
"""Serviço de simulação de partidas de Banco Imobiliário
Returns:
str: string a ser exibida no Console com resultado da simulação
"""
resultado = ""
try:
# Guarda o tempo de início da simulação
simulacao_comeco = time.time()
# Cria o objeto que guardará as informações dos resultados
resultado_partidas = resultado_model()
# Cria a lista de jogadores a partir dos tipos registrados
resultado_partidas.jogadores = list[jogador_model]()
for tj in enum_tipo_jogador:
resultado_partidas.jogadores.append(jogador_model(tj))
# Cria as propriedades de acordo com o tamanho do tabuleiro
propriedades = list[propriedade_model]()
for i in range(21):
propriedades.append(propriedade_model(i-1))
# Faz a simulação de 300 partidas
for i in range(300):
resultado_partidas = self.simular_partida(resultado_partidas, propriedades)
# Antes de retornar os resultados, ordena a lista de jogadores pelo número de vitórias
resultado_partidas.jogadores.sort(key=lambda j:(j.vitorias), reverse=True)
# Guarda o momento em que a simulação foi terminada e calcula o tempo de execução
simulacao_fim = time.time()
resultado_partidas.tempo_execucao = simulacao_fim - simulacao_comeco
# retorna o resultado formatado no Console
resultado = get_console_string(resultado_partidas)
except Exception as e:
# Caso a simulação dê qualquer tipo de erro, registra a exceção no Console
resultado = "Erro inexperado: " + str(e)
return resultado
def simular_partida(self, resultado_partida: resultado_model, propriedades: list[propriedade_model]) -> resultado_model:
"""Método para simular uma partida inteira de Banco Imobiliário
Args:
resultado_partida (resultado_model): resultado da partida atual
propriedades (list[propriedade_model]): lista de propriedades do tabuleiro
Returns:
resultado_model: resultado da partida atualizado
"""
# Cada partida pode durar no máximo 1000 jogadas
partidaFinalizada = False
# Define a ordem dos jogadores e reinicia os parametros
jogadores_partida = resultado_partida.jogadores[:]
random.shuffle(jogadores_partida)
for j in jogadores_partida:
j.saldo = 300
j.casa_atual = 0
for p in propriedades:
p.proprietario = None
for i in range(1000):
resultado_partida.turnos_usados += 1
partidaFinalizada = self.simular_jogada(jogadores_partida, propriedades)
# Se o resultado da jogada termina com um vencedor, finaliza o loop
if partidaFinalizada:
resultado_partida.total_vitorias += 1
for r in resultado_partida.jogadores:
if(r.tipo == jogadores_partida[0].tipo):
r.vitorias += 1
break
break
# Se passadas as 1000 jogadas e a partida ainda não finalizou, acrescenta-se o número de timeouts
if partidaFinalizada == False:
resultado_partida.timeouts += 1
return resultado_partida
def simular_jogada(self, jogadores: list[jogador_model], propriedades: list[propriedade_model]) -> bool:
"""Método para simular um turno dentro do jogo de Banco Imobiliário
Args:
jogadores (list[jogador_model]): lista de jogadores da partida
propriedades (list[propriedade_model]): lista de propriedades do tabuleiro
Returns:
bool: status de vitória dentro do jogo
"""
partidaFinalizada = False
for j in jogadores:
# Em cada jogada o jogador lança um dado de 6 faces
j.casa_atual += random.randint(1, 6)
# Se o jogador ultrapassar o número de casas do tabuleiro, recebe 100 em saldo
if j.casa_atual >= len(propriedades):
j.casa_atual -= len(propriedades)
j.saldo += 100
if(propriedades[j.casa_atual].proprietario != None):
if j.casa_atual != 0:
if(j.saldo >= propriedades[j.casa_atual].valor_aluguel):
propriedades[j.casa_atual].proprietario.saldo += propriedades[j.casa_atual].valor_aluguel
j.saldo -= propriedades[j.casa_atual].valor_aluguel
else:
propriedades[j.casa_atual].proprietario.saldo += j.saldo
jogadores.remove(j)
# Se sobrar apenas 1 jogador, este vence a partida
if len(jogadores) == 1:
partidaFinalizada = True
break
else:
acao_jogador = {
1 : self.jogador_impulsivo(j, propriedades[j.casa_atual]),
2 : self.jogador_exigente(j, propriedades[j.casa_atual]),
3 : self.jogador_cauteloso(j, propriedades[j.casa_atual]),
4 : self.jogador_aleatorio(j, propriedades[j.casa_atual])
}
acao_jogador[int(j.tipo)]
return partidaFinalizada
def get_console_string(resultado: resultado_model) -> str:
"""Método de ajuda para formatar a resposta dada ao console
Args:
resultado (resultado_model): o resultado das partidas
Returns:
str: string a ser exibida no Console
"""
string_final = "\nSimulação realizada em {0} segundos".format(str(resultado.tempo_execucao))
string_final += "\n\n"
string_final += "#-------------------------------------------------------------------------#\n"
string_final += "| Jogador | Vitórias |\n"
posicao = 0
for j in resultado.jogadores:
posicao += 1
posicao_str = "#{0} Jogador {1}".format(str(posicao), j.tipo.name)
vitoria_str = str(j.vitorias) + " (" +"%.2f" % (j.vitorias/resultado.total_vitorias * 100) + "%)"
string_final += "---------------------------------------------------------------------------\n"
string_final += "|" + f"{posicao_str:^36}" + "|" + f"{vitoria_str:^36}" + "|\n"
media_turnos_str = "%.2f" % (resultado.turnos_usados / 300)
string_final += "---------------------------------------------------------------------------\n"
string_final += "| MÉDIA DE TURNOS |" + f"{str(media_turnos_str):^36}" + "|\n"
string_final += "---------------------------------------------------------------------------\n"
string_final += "| TIMEOUTS |" + f"{str(resultado.timeouts):^36}" + "|\n"
string_final += "#-------------------------------------------------------------------------#\n"
return string_final
| 44.213333 | 124 | 0.602935 |
15ae2a43e15b412824b297ac8aa929f9ea29b4ed
| 2,122 |
py
|
Python
|
fim/graph/resources/networkx_arm.py
|
fabric-testbed/InformationModel
|
6f01e208b2cf5f8b721d16a48e4df97ce0b727bc
|
[
"MIT"
] | 6 |
2020-07-14T22:52:53.000Z
|
2021-09-25T09:20:25.000Z
|
fim/graph/resources/networkx_arm.py
|
fabric-testbed/InformationModel
|
6f01e208b2cf5f8b721d16a48e4df97ce0b727bc
|
[
"MIT"
] | 69 |
2020-10-09T10:32:36.000Z
|
2022-03-28T16:18:11.000Z
|
fim/graph/resources/networkx_arm.py
|
fabric-testbed/InformationModel
|
6f01e208b2cf5f8b721d16a48e4df97ce0b727bc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: Ilya Baldin ([email protected])
"""
NetworkX implementation of ADM (Aggregate Delegation Model) functionality.
"""
import uuid
from ..networkx_property_graph import NetworkXPropertyGraph, NetworkXGraphImporter
from .abc_arm import ABCARMPropertyGraph
class NetworkXARMGraph(ABCARMPropertyGraph, NetworkXPropertyGraph):
def __init__(self, *, graph: NetworkXPropertyGraph, logger=None):
"""
Initialize NetworkX ARM - supply an implementation of a graph
:param graph:
"""
super().__init__(graph=graph, logger=logger)
class NetworkXARMFactory:
"""
Help convert graphs between formats so long as they are rooted in NetworkXPropertyGraph
"""
@staticmethod
def create(graph: NetworkXPropertyGraph) -> NetworkXARMGraph:
assert graph is not None
assert isinstance(graph.importer, NetworkXGraphImporter)
return NetworkXARMGraph(graph=graph,
logger=graph.log)
| 37.22807 | 91 | 0.744581 |
fc6ea967a894190f7ed6e90d6f5d4a07971e67de
| 37,591 |
py
|
Python
|
corehq/apps/locations/tests/test_location_fixtures.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 1 |
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
corehq/apps/locations/tests/test_location_fixtures.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 94 |
2020-12-11T06:57:31.000Z
|
2022-03-15T10:24:06.000Z
|
corehq/apps/locations/tests/test_location_fixtures.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import uuid
from collections import namedtuple
from datetime import datetime, timedelta
from xml.etree import cElementTree as ElementTree
from django.test import TestCase
import mock
from casexml.apps.phone.models import SimplifiedSyncLog
from casexml.apps.phone.restore import RestoreParams
from casexml.apps.phone.tests.utils import (
call_fixture_generator,
create_restore_user,
)
from corehq.apps.app_manager.tests.util import (
TestXmlMixin,
extract_xml_partial,
)
from corehq.apps.commtrack.tests.util import bootstrap_domain
from corehq.apps.custom_data_fields.models import (
CustomDataFieldsDefinition,
Field,
)
from corehq.apps.domain.models import Domain
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.users.dbaccessors.all_commcare_users import delete_all_users
from corehq.apps.users.models import CommCareUser
from corehq.util.test_utils import flag_enabled, generate_cases
from ..fixtures import (
LocationSet,
_get_location_data_fields,
_location_to_fixture,
flat_location_fixture_generator,
get_location_fixture_queryset,
location_fixture_generator,
related_locations_fixture_generator,
should_sync_flat_fixture,
should_sync_hierarchical_fixture,
should_sync_locations,
)
from ..models import (
LocationFixtureConfiguration,
LocationRelation,
LocationType,
SQLLocation,
make_location,
)
from .util import (
LocationHierarchyTestCase,
LocationStructure,
LocationTypeStructure,
setup_location_types_with_structure,
setup_locations_with_structure,
)
EMPTY_LOCATION_FIXTURE_TEMPLATE = """
<fixture id='commtrack:locations' user_id='{}'>
<empty_element/>
</fixture>
"""
TEST_LOCATION_STRUCTURE = [
('Massachusetts', [
('Middlesex', [
('Cambridge', []),
('Somerville', []),
]),
('Suffolk', [
('Boston', []),
('Revere', []),
])
]),
('New York', [
('New York City', [
('Manhattan', []),
('Brooklyn', []),
('Queens', []),
]),
]),
]
class FixtureHasLocationsMixin(TestXmlMixin):
root = os.path.dirname(__file__)
file_path = ['data']
def _assemble_expected_fixture(self, xml_name, desired_locations):
ids = {
"{}_id".format(desired_location.lower().replace(" ", "_")): (
self.locations[desired_location].location_id
)
for desired_location in desired_locations
} # eg: {"massachusetts_id" = self.locations["Massachusetts"].location_id}
return self.get_xml(xml_name).decode('utf-8').format(
user_id=self.user.user_id,
**ids
)
# Adding this feature flag allows rendering of hierarchical fixture where requested
# and wont interfere with flat fixture generation
@flag_enabled('HIERARCHICAL_LOCATION_FIXTURE')
def _assert_fixture_matches_file(self, xml_name, desired_locations, flat=False, related=False):
if flat:
generator = flat_location_fixture_generator
elif related:
generator = related_locations_fixture_generator
else:
generator = location_fixture_generator
fixture = ElementTree.tostring(call_fixture_generator(generator, self.user)[-1])
desired_fixture = self._assemble_expected_fixture(xml_name, desired_locations)
self.assertXmlEqual(desired_fixture, fixture)
def assert_fixture_queryset_equals_locations(self, desired_locations):
actual = get_location_fixture_queryset(self.user).values_list('name', flat=True)
self.assertItemsEqual(actual, desired_locations)
@mock.patch.object(Domain, 'uses_locations', lambda: True) # removes dependency on accounting
class LocationFixturesTest(LocationHierarchyTestCase, FixtureHasLocationsMixin):
location_type_names = ['state', 'county', 'city']
location_structure = TEST_LOCATION_STRUCTURE
def setUp(self):
super(LocationFixturesTest, self).setUp()
self.user = create_restore_user(self.domain, 'user', '123')
def tearDown(self):
self.user._couch_user.delete(deleted_by=None)
for lt in self.location_types.values():
lt.expand_to = None
lt._expand_from_root = False
lt._expand_from = None
lt.include_without_expanding = None
lt.include_only.set([])
lt.save()
for loc in self.locations.values():
loc.location_type.refresh_from_db()
super(LocationFixturesTest, self).tearDown()
@flag_enabled('HIERARCHICAL_LOCATION_FIXTURE')
def test_no_user_locations_returns_empty(self):
empty_fixture = EMPTY_LOCATION_FIXTURE_TEMPLATE.format(self.user.user_id)
fixture = ElementTree.tostring(call_fixture_generator(location_fixture_generator, self.user)[0])
self.assertXmlEqual(empty_fixture, fixture)
def test_metadata(self):
location_type = self.location_types['state']
location = SQLLocation(
id="854208",
domain="test-domain",
name="Braavos",
location_type=location_type,
metadata={
'best_swordsman': "Sylvio Forel",
'in_westeros': "false",
'appeared_in_num_episodes': 3,
},
)
location_db = LocationSet([location])
data_fields = [
Field(slug='best_swordsman'),
Field(slug='in_westeros'),
Field(slug='appeared_in_num_episodes'),
]
fixture = _location_to_fixture(location_db, location, location_type, data_fields)
location_data = {
e.tag: e.text for e in fixture.find('location_data')
}
self.assertEqual(location_data, {k: str(v) for k, v in location.metadata.items()})
def test_simple_location_fixture(self):
self.user._couch_user.set_location(self.locations['Suffolk'])
self._assert_fixture_matches_file(
'simple_fixture',
['Massachusetts', 'Suffolk', 'Boston', 'Revere']
)
def test_multiple_locations(self):
self.user._couch_user.add_to_assigned_locations(self.locations['Suffolk'])
self.user._couch_user.add_to_assigned_locations(self.locations['New York City'])
self._assert_fixture_matches_file(
'multiple_locations',
['Massachusetts', 'Suffolk', 'Boston', 'Revere', 'New York',
'New York City', 'Manhattan', 'Queens', 'Brooklyn']
)
def test_all_locations_flag_returns_all_locations(self):
with flag_enabled('SYNC_ALL_LOCATIONS'):
self._assert_fixture_matches_file(
'expand_from_root',
['Massachusetts', 'Suffolk', 'Middlesex', 'Boston', 'Revere', 'Cambridge',
'Somerville', 'New York', 'New York City', 'Manhattan', 'Queens', 'Brooklyn']
)
def test_expand_to_county(self):
"""
expand to "county"
should return:
Mass
- Suffolk
"""
self.user._couch_user.set_location(self.locations['Suffolk'])
location_type = self.locations['Suffolk'].location_type
location_type.expand_to = location_type
location_type.save()
self._assert_fixture_matches_file(
'expand_to_county',
['Massachusetts', 'Suffolk']
)
def test_expand_to_county_from_state(self):
self.user._couch_user.set_location(self.locations['Massachusetts'])
location_type = self.locations['Massachusetts'].location_type
location_type.expand_to = self.locations['Suffolk'].location_type
location_type.save()
self._assert_fixture_matches_file(
'expand_to_county_from_state',
['Massachusetts', 'Suffolk', 'Middlesex']
)
def test_expand_from_county_at_city(self):
self.user._couch_user.set_location(self.locations['Boston'])
location_type = self.locations['Boston'].location_type
location_type.expand_from = self.locations['Suffolk'].location_type
location_type.save()
self._assert_fixture_matches_file(
'expand_from_county_at_city',
['Massachusetts', 'Suffolk', 'Middlesex', 'Boston', 'Revere']
)
def test_expand_from_root_at_city(self):
self.user._couch_user.set_location(self.locations['Boston'])
location_type = self.locations['Boston'].location_type
location_type.expand_from_root = True
location_type.save()
self._assert_fixture_matches_file(
'expand_from_root',
['Massachusetts', 'Suffolk', 'Middlesex', 'Boston', 'Revere', 'Cambridge',
'Somerville', 'New York', 'New York City', 'Manhattan', 'Queens', 'Brooklyn']
)
def test_expand_from_root_to_county(self):
self.user._couch_user.set_location(self.locations['Massachusetts'])
location_type = self.locations['Massachusetts'].location_type
location_type.expand_from_root = True
location_type.expand_to = self.locations['Suffolk'].location_type
location_type.save()
self._assert_fixture_matches_file(
'expand_from_root_to_county',
['Massachusetts', 'Suffolk', 'Middlesex', 'New York', 'New York City']
)
def test_flat_sync_format(self):
with flag_enabled('SYNC_ALL_LOCATIONS'):
self._assert_fixture_matches_file(
'expand_from_root_flat',
['Massachusetts', 'Suffolk', 'Middlesex', 'Boston', 'Revere', 'Cambridge',
'Somerville', 'New York', 'New York City', 'Manhattan', 'Queens', 'Brooklyn'],
flat=True,
)
def test_include_without_expanding(self):
self.user._couch_user.set_location(self.locations['Boston'])
location_type = self.locations['Boston'].location_type
location_type.expand_from = self.locations['Suffolk'].location_type
location_type.include_without_expanding = self.locations['Massachusetts'].location_type
location_type.save()
self._assert_fixture_matches_file(
'include_without_expanding',
['Massachusetts', 'Suffolk', 'Boston', 'Revere', 'New York']
)
def test_include_without_expanding_same_level(self):
# I want a list of all the counties, but only the cities in my county
self.user._couch_user.set_location(self.locations['Boston'])
location_type = self.locations['Boston'].location_type
# Get all the counties
location_type.include_without_expanding = self.locations['Middlesex'].location_type
# Expand downwards from my county
location_type.expand_from = self.locations['Middlesex'].location_type
location_type.save()
self._assert_fixture_matches_file(
'include_without_expanding_same_level',
['Massachusetts', 'New York', 'Middlesex', 'Suffolk', 'New York City', 'Boston', 'Revere']
) # (New York City is of type "county")
def test_include_without_expanding_lower_level(self):
# I want all all the cities, but am at the state level
self.user._couch_user.set_location(self.locations['Massachusetts'])
location_type = self.locations['Massachusetts'].location_type
# Get all the cities
location_type.include_without_expanding = self.locations['Revere'].location_type
location_type.save()
self._assert_fixture_matches_file(
'expand_from_root', # This is the same as expanding from root / getting all locations
['Massachusetts', 'Suffolk', 'Middlesex', 'Boston', 'Revere', 'Cambridge',
'Somerville', 'New York', 'New York City', 'Manhattan', 'Queens', 'Brooklyn']
)
def test_include_only_location_types(self):
# I want all all the cities, but am at the state level
self.user._couch_user.set_location(self.locations['Massachusetts'])
location_type = self.locations['Massachusetts'].location_type
location_type.include_only.set([self.location_types['state'], self.location_types['county']])
location_type.save()
# include county and state
self.assert_fixture_queryset_equals_locations(
['Massachusetts', 'Suffolk', 'Middlesex']
)
@flag_enabled('HIERARCHICAL_LOCATION_FIXTURE')
def test_include_only_location_types_hierarchical(self):
self.user._couch_user.set_location(self.locations['Massachusetts'])
location_type = self.locations['Massachusetts'].location_type
location_type.include_only.set([self.location_types['state'], self.location_types['county']])
location_type.save()
self._assert_fixture_matches_file(
'expand_to_county_from_state',
['Massachusetts', 'Suffolk', 'Middlesex']
)
@mock.patch.object(Domain, 'uses_locations', lambda: True) # removes dependency on accounting
class ForkedHierarchiesTest(TestCase, FixtureHasLocationsMixin):
def setUp(self):
super(ForkedHierarchiesTest, self).setUp()
self.domain = 'test'
self.domain_obj = bootstrap_domain(self.domain)
self.addCleanup(self.domain_obj.delete)
self.user = create_restore_user(self.domain, 'user', '123')
location_type_structure = [
LocationTypeStructure('ctd', [
LocationTypeStructure('sto', [
LocationTypeStructure('cto', [
LocationTypeStructure('dto', [
LocationTypeStructure('tu', [
LocationTypeStructure('phi', []),
LocationTypeStructure('dmc', []),
]),
])
]),
LocationTypeStructure('drtb', []),
LocationTypeStructure('cdst', []),
])
])
]
location_structure = [
LocationStructure('CTD', 'ctd', [
LocationStructure('STO', 'sto', [
LocationStructure('CTO', 'cto', [
LocationStructure('DTO', 'dto', [
LocationStructure('TU', 'tu', [
LocationStructure('PHI', 'phi', []),
LocationStructure('DMC', 'dmc', []),
]),
])
]),
LocationStructure('DRTB', 'drtb', []),
LocationStructure('CDST', 'cdst', []),
]),
LocationStructure('STO1', 'sto', [
LocationStructure('CTO1', 'cto', [
LocationStructure('DTO1', 'dto', [
LocationStructure('TU1', 'tu', [
LocationStructure('PHI1', 'phi', []),
LocationStructure('DMC1', 'dmc', []),
]),
])
]),
LocationStructure('DRTB1', 'drtb', []),
LocationStructure('CDST1', 'cdst', []),
])
])
]
location_metadata = {'is_test': 'no', 'nikshay_code': 'nikshay_code'}
setup_location_types_with_structure(self.domain, location_type_structure),
self.locations = setup_locations_with_structure(self.domain, location_structure, location_metadata)
def tearDown(self):
delete_all_users()
super(ForkedHierarchiesTest, self).tearDown()
def test_include_without_expanding_includes_all_ancestors(self):
self.user._couch_user.set_location(self.locations['DTO'])
location_type = self.locations['DTO'].location_type
location_type.include_without_expanding = self.locations['DTO'].location_type
location_type.save()
fixture = ElementTree.tostring(call_fixture_generator(flat_location_fixture_generator, self.user)[-1]).decode('utf-8')
for location_name in ('CDST1', 'CDST', 'DRTB1', 'DRTB', 'DTO1', 'DTO', 'CTO', 'CTO1', 'CTD'):
self.assertTrue(location_name in fixture)
for location_name in ('PHI1', 'TU1', 'DMC1'):
self.assertFalse(location_name in fixture)
@mock.patch.object(Domain, 'uses_locations', lambda: True) # removes dependency on accounting
class LocationFixturesDataTest(LocationHierarchyTestCase, FixtureHasLocationsMixin):
location_type_names = ['state', 'county', 'city']
location_structure = [
('Massachusetts', [
('Middlesex', [
('Cambridge', []),
('Somerville', []),
]),
('Suffolk', [
('Boston', []),
('Revere', []),
])
]),
]
@classmethod
def setUpClass(cls):
super(LocationFixturesDataTest, cls).setUpClass()
cls.user = create_restore_user(cls.domain, 'user', '123')
cls.loc_fields = CustomDataFieldsDefinition.get_or_create(cls.domain, LocationFieldsView.field_type)
cls.loc_fields.set_fields([
Field(slug='baseball_team'),
Field(slug='favorite_pastime'),
])
cls.loc_fields.save()
cls.field_slugs = [f.slug for f in cls.loc_fields.get_fields()]
def setUp(self):
# this works around the fact that get_locations_to_sync is memoized on OTARestoreUser
self.user = self.user._couch_user.to_ota_restore_user()
@classmethod
def tearDownClass(cls):
cls.loc_fields.delete()
cls.user._couch_user.delete(deleted_by=None)
super(LocationFixturesDataTest, cls).tearDownClass()
def test_utility_method(self):
self.assertItemsEqual(self.field_slugs, [f.slug for f in _get_location_data_fields(self.domain)])
def test_utility_method_empty(self):
self.assertEqual([], [f.slug for f in _get_location_data_fields('no-fields-defined')])
def test_metadata_added_to_all_nodes(self):
mass = self.locations['Massachusetts']
self.user._couch_user.set_location(mass)
fixture = call_fixture_generator(flat_location_fixture_generator, self.user)[1] # first node is index
location_nodes = fixture.findall('locations/location')
self.assertEqual(7, len(location_nodes))
for location_node in location_nodes:
location_data_nodes = [child for child in location_node.find('location_data')]
self.assertEqual(2, len(location_data_nodes))
tags = {n.tag for n in location_data_nodes}
self.assertItemsEqual(tags, self.field_slugs)
def test_additional_metadata_not_included(self):
mass = self.locations['Massachusetts']
mass.metadata = {'driver_friendliness': 'poor'}
mass.save()
def _clear_metadata():
mass.metadata = {}
mass.save()
self.addCleanup(_clear_metadata)
self.user._couch_user.set_location(mass)
fixture = call_fixture_generator(flat_location_fixture_generator, self.user)[1] # first node is index
mass_data = [
field for field in fixture.find('locations/location[@id="{}"]/location_data'.format(mass.location_id))
]
self.assertEqual(2, len(mass_data))
self.assertItemsEqual(self.field_slugs, [f.tag for f in mass_data])
def test_existing_metadata_works(self):
mass = self.locations['Massachusetts']
mass.metadata = {'baseball_team': 'Red Sox'}
mass.save()
def _clear_metadata():
mass.metadata = {}
mass.save()
self.addCleanup(_clear_metadata)
self.user._couch_user.set_location(mass)
fixture = call_fixture_generator(flat_location_fixture_generator, self.user)[1] # first node is index
self.assertEqual(
'Red Sox',
fixture.find(
'locations/location[@id="{}"]/location_data/baseball_team'.format(mass.location_id)
).text
)
@mock.patch.object(Domain, 'uses_locations', lambda: True) # removes dependency on accounting
class WebUserLocationFixturesTest(LocationHierarchyTestCase, FixtureHasLocationsMixin):
location_type_names = ['state', 'county', 'city']
location_structure = TEST_LOCATION_STRUCTURE
def setUp(self):
super(WebUserLocationFixturesTest, self).setUp()
delete_all_users()
self.user = create_restore_user(self.domain, 'web_user', '123', is_mobile_user=False)
@flag_enabled('HIERARCHICAL_LOCATION_FIXTURE')
def test_no_user_locations_returns_empty(self):
empty_fixture = EMPTY_LOCATION_FIXTURE_TEMPLATE.format(self.user.user_id)
fixture = ElementTree.tostring(call_fixture_generator(location_fixture_generator, self.user)[0])
self.assertXmlEqual(empty_fixture, fixture)
def test_simple_location_fixture(self):
self.user._couch_user.set_location(self.domain, self.locations['Suffolk'])
self._assert_fixture_matches_file(
'simple_fixture',
['Massachusetts', 'Suffolk', 'Boston', 'Revere']
)
def test_multiple_locations(self):
self.user._couch_user.add_to_assigned_locations(self.domain, self.locations['Suffolk'])
self.user._couch_user.add_to_assigned_locations(
self.domain,
self.locations['New York City']
)
self._assert_fixture_matches_file(
'multiple_locations',
['Massachusetts', 'Suffolk', 'Boston', 'Revere', 'New York',
'New York City', 'Manhattan', 'Queens', 'Brooklyn']
)
@mock.patch.object(Domain, 'uses_locations', lambda: True) # removes dependency on accounting
class ForkedHierarchyLocationFixturesTest(TestCase, FixtureHasLocationsMixin):
"""
- State
- County
- City
- Region
- Town
"""
domain = 'forked-hierarchy-domain'
location_type_structure = [
LocationTypeStructure('state', [
LocationTypeStructure('county', [
LocationTypeStructure('city', [])
]),
LocationTypeStructure('region', [
LocationTypeStructure('town', [])
])
])
]
location_structure = [
LocationStructure('Massachusetts', 'state', [
LocationStructure('Middlesex', 'county', [
LocationStructure('Cambridge', 'city', []),
LocationStructure('Somerville', 'city', [])
]),
LocationStructure('Suffolk', 'county', [
LocationStructure('Boston', 'city', []),
]),
LocationStructure('Berkshires', 'region', [
LocationStructure('Granville', 'town', []),
LocationStructure('Granby', 'town', []),
]),
LocationStructure('Pioneer Valley', 'region', [
LocationStructure('Greenfield', 'town', []),
]),
])
]
def setUp(self):
self.domain_obj = bootstrap_domain(self.domain)
self.user = create_restore_user(self.domain, 'user', '123')
self.location_types = setup_location_types_with_structure(self.domain, self.location_type_structure)
self.locations = setup_locations_with_structure(self.domain, self.location_structure)
def tearDown(self):
self.domain_obj.delete()
def test_forked_locations(self, *args):
self.user._couch_user.set_location(self.locations['Massachusetts'])
location_type = self.locations['Massachusetts'].location_type
location_type.expand_to = self.locations['Middlesex'].location_type
location_type.save()
self._assert_fixture_matches_file(
'forked_expand_to_county',
['Massachusetts', 'Suffolk', 'Middlesex', 'Berkshires', 'Pioneer Valley']
)
def test_include_only_location_types(self):
self.user._couch_user.set_location(self.locations['Massachusetts'])
location_type = self.locations['Massachusetts'].location_type
location_type.include_only.set([
self.location_types['state'],
self.location_types['county'],
self.location_types['city'],
])
location_type.save()
# include county and state
self.assert_fixture_queryset_equals_locations([
'Massachusetts',
'Middlesex',
'Cambridge',
'Somerville',
'Suffolk',
'Boston',
])
@flag_enabled("RELATED_LOCATIONS")
@mock.patch.object(Domain, 'uses_locations', lambda: True) # removes dependency on accounting
class RelatedLocationFixturesTest(LocationHierarchyTestCase, FixtureHasLocationsMixin):
"""
- State
- County
- City
"""
location_type_names = ['state', 'county', 'city']
location_structure = TEST_LOCATION_STRUCTURE
@classmethod
def setUpClass(cls):
super(RelatedLocationFixturesTest, cls).setUpClass()
cls.user = create_restore_user(cls.domain, 'user', '123')
cls.relation = LocationRelation.objects.create(
location_a=cls.locations["Cambridge"],
location_b=cls.locations["Boston"]
)
@classmethod
def tearDownClass(cls):
cls.user._couch_user.delete(deleted_by=None)
super(RelatedLocationFixturesTest, cls).tearDownClass()
def tearDown(self):
self.user._couch_user.reset_locations([])
def test_related_locations(self, *args):
self.user._couch_user.add_to_assigned_locations(self.locations['Boston'])
self._assert_fixture_matches_file(
'related_location_flat_fixture',
['Massachusetts', 'Middlesex', 'Cambridge', 'Boston', 'Suffolk'],
flat=True
)
self._assert_fixture_matches_file(
'related_location',
['Boston', 'Cambridge'],
related=True
)
def test_related_locations_parent_location(self, *args):
# verify that being assigned to a parent location pulls in sub location's relations
self.user._couch_user.add_to_assigned_locations(self.locations['Middlesex'])
self._assert_fixture_matches_file(
'related_location_flat_fixture',
['Massachusetts', 'Middlesex', 'Cambridge', 'Boston', 'Suffolk'],
flat=True
)
self._assert_fixture_matches_file(
'related_location',
['Boston', 'Cambridge'],
related=True
)
def test_related_locations_with_distance(self, *args):
self.user._couch_user.add_to_assigned_locations(self.locations['Boston'])
self.relation.distance = 5
self.relation.save()
self.addCleanup(lambda: LocationRelation.objects.filter(pk=self.relation.pk).update(distance=None))
self._assert_fixture_matches_file(
'related_location_with_distance_flat_fixture',
['Massachusetts', 'Middlesex', 'Cambridge', 'Boston', 'Suffolk'],
flat=True
)
self._assert_fixture_matches_file(
'related_location_with_distance',
['Boston', 'Cambridge'],
related=True
)
def test_should_sync_when_changed(self, *args):
self.user._couch_user.add_to_assigned_locations(self.locations['Boston'])
last_sync_time = datetime.utcnow()
sync_log = SimplifiedSyncLog(date=last_sync_time)
locations_queryset = SQLLocation.objects.filter(pk=self.locations['Boston'].pk)
restore_state = MockRestoreState(self.user, RestoreParams())
self.assertFalse(should_sync_locations(sync_log, locations_queryset, restore_state))
self.assertEqual(
len(call_fixture_generator(related_locations_fixture_generator, self.user, last_sync=sync_log)), 0)
LocationRelation.objects.create(location_a=self.locations["Revere"], location_b=self.locations["Boston"])
self.assertTrue(should_sync_locations(SimplifiedSyncLog(date=last_sync_time), locations_queryset, restore_state))
# length 2 for index definition + data
self.assertEqual(
len(call_fixture_generator(related_locations_fixture_generator, self.user, last_sync=sync_log)), 2)
def test_force_empty_when_user_has_no_locations(self, *args):
sync_log = SimplifiedSyncLog(date=datetime.utcnow())
# no relations have been touched since this SimplifiedSyncLog, but it still pushes down the empty list
self.assertEqual(
len(call_fixture_generator(related_locations_fixture_generator, self.user, last_sync=sync_log)), 2)
class ShouldSyncLocationFixturesTest(TestCase):
@classmethod
def setUpClass(cls):
super(ShouldSyncLocationFixturesTest, cls).setUpClass()
delete_all_users()
cls.domain = "Erebor"
cls.domain_obj = create_domain(cls.domain)
cls.username = "Durins Bane"
cls.location_type = LocationType(
domain=cls.domain,
name="state",
code="state",
)
password = "What have I got in my pocket"
cls.user = CommCareUser.create(cls.domain, cls.username, password, None, None)
cls.user.save()
cls.location_type.save()
@classmethod
def tearDownClass(cls):
cls.domain_obj.delete()
super(ShouldSyncLocationFixturesTest, cls).tearDownClass()
def test_should_sync_locations_change_location_type(self):
"""
When location_type gets changed, we should resync locations
"""
yesterday = datetime.today() - timedelta(1)
day_before_yesterday = yesterday - timedelta(1)
LocationType.objects.all().update(last_modified=day_before_yesterday) # Force update because of auto_now
self.location_type = LocationType.objects.last()
location = SQLLocation(
domain=self.domain,
name="Meereen",
location_type=self.location_type,
metadata={'queen': "Daenerys Targaryen",
'rebels': "Sons of the Harpy"},
)
location.save()
SQLLocation.objects.filter(pk=location.pk).update(last_modified=day_before_yesterday)
location = SQLLocation.objects.last()
locations_queryset = SQLLocation.objects.filter(pk=location.pk)
restore_state = MockRestoreState(self.user.to_ota_restore_user(), RestoreParams())
self.assertFalse(
should_sync_locations(SimplifiedSyncLog(date=yesterday), locations_queryset, restore_state)
)
self.location_type.shares_cases = True
self.location_type.save()
location = SQLLocation.objects.last()
locations_queryset = SQLLocation.objects.filter(pk=location.pk)
self.assertTrue(
should_sync_locations(SimplifiedSyncLog(date=yesterday), locations_queryset, restore_state)
)
def test_archiving_location_should_resync(self):
"""
When locations are archived, we should resync them
"""
location = make_location(
domain=self.domain,
name='winterfell',
location_type=self.location_type.name,
)
location.save()
after_save = datetime.utcnow()
self.assertEqual('winterfell', location.name)
locations_queryset = SQLLocation.objects.filter(pk=location.pk)
restore_state = MockRestoreState(self.user.to_ota_restore_user(), RestoreParams())
# Should not resync if last sync was after location save
self.assertFalse(
should_sync_locations(SimplifiedSyncLog(date=after_save), locations_queryset, restore_state)
)
# archive the location
location.archive()
after_archive = datetime.utcnow()
location = SQLLocation.objects.last()
locations_queryset = SQLLocation.objects.filter(pk=location.pk)
# Should resync if last sync was after location was saved but before location was archived
self.assertTrue(
should_sync_locations(SimplifiedSyncLog(date=after_save), locations_queryset, restore_state)
)
# Should not resync if last sync was after location was deleted
self.assertFalse(
should_sync_locations(SimplifiedSyncLog(date=after_archive), locations_queryset, restore_state)
)
def test_changed_build_id(self):
app = MockApp('project_default', 'build_1')
restore_state = MockRestoreState(self.user.to_ota_restore_user(), RestoreParams(app=app))
sync_log_from_old_app = SimplifiedSyncLog(date=datetime.utcnow(), build_id=app.get_id)
self.assertFalse(
should_sync_locations(sync_log_from_old_app, SQLLocation.objects.all(), restore_state)
)
new_build = MockApp('project_default', 'build_2')
restore_state = MockRestoreState(self.user.to_ota_restore_user(), RestoreParams(app=new_build))
self.assertTrue(
should_sync_locations(sync_log_from_old_app, SQLLocation.objects.all(), restore_state)
)
MockApp = namedtuple("MockApp", ["location_fixture_restore", "get_id"])
MockRestoreState = namedtuple("MockRestoreState", ["restore_user", "params"])
@mock.patch('corehq.apps.domain.models.Domain.uses_locations', lambda: True)
class LocationFixtureSyncSettingsTest(TestCase):
@classmethod
def setUpClass(cls):
super(LocationFixtureSyncSettingsTest, cls).setUpClass()
cls.domain_obj = Domain(name=uuid.uuid4().hex)
cls.domain_obj.save()
@classmethod
def tearDownClass(cls):
cls.domain_obj.delete()
super(LocationFixtureSyncSettingsTest, cls).tearDownClass()
def test_should_sync_hierarchical_format_default(self):
self.assertEqual(False, should_sync_hierarchical_fixture(self.domain_obj, app=None))
def test_should_sync_flat_format_default(self):
self.assertEqual(True, should_sync_flat_fixture(self.domain_obj, app=None))
@flag_enabled('HIERARCHICAL_LOCATION_FIXTURE')
def test_sync_format_with_toggle_enabled(self):
# in prep for migration to flat fixture as default, values set for domains which
# have locations and does not have the old FF FLAT_LOCATION_FIXTURE enabled
conf = LocationFixtureConfiguration.for_domain(self.domain_obj.name)
conf.sync_hierarchical_fixture = True
conf.sync_flat_fixture = False # default value
conf.save()
# stay on hierarchical by default
self.assertEqual(True, should_sync_hierarchical_fixture(self.domain_obj, app=None))
self.assertEqual(False, should_sync_flat_fixture(self.domain_obj, app=None))
# when domains are tested for migration by switching conf
conf.sync_hierarchical_fixture = False
conf.sync_flat_fixture = True # default value
conf.save()
self.assertEqual(False, should_sync_hierarchical_fixture(self.domain_obj, app=None))
self.assertEqual(True, should_sync_flat_fixture(self.domain_obj, app=None))
def test_sync_format_with_disabled_toggle(self):
self.assertEqual(False, should_sync_hierarchical_fixture(self.domain_obj, app=None))
self.assertEqual(True, should_sync_flat_fixture(self.domain_obj, app=None))
# This should not happen ideally since the conf can not be set without having HIERARCHICAL_LOCATION_FIXTURE
# enabled. Considering that a domain has sync hierarchical fixture set to False without the FF
# HIERARCHICAL_LOCATION_FIXTURE. In such case the domain stays on flat fixture format
conf = LocationFixtureConfiguration.for_domain(self.domain_obj.name)
conf.sync_hierarchical_fixture = False
conf.sync_flat_fixture = True # default value
conf.save()
self.assertEqual(False, should_sync_hierarchical_fixture(self.domain_obj, app=None))
self.assertEqual(True, should_sync_flat_fixture(self.domain_obj, app=None))
@flag_enabled('HIERARCHICAL_LOCATION_FIXTURE')
def test_sync_format_with_app_aware_project_default(self):
app = MockApp(location_fixture_restore='project_default', get_id="build")
conf = LocationFixtureConfiguration.for_domain(self.domain_obj.name)
conf.sync_hierarchical_fixture = True
conf.sync_flat_fixture = False
conf.save()
self.assertTrue(should_sync_hierarchical_fixture(self.domain_obj, app))
self.assertFalse(should_sync_flat_fixture(self.domain_obj, app))
@generate_cases([
('both_fixtures', True, True),
('only_flat_fixture', True, False),
('only_hierarchical_fixture', False, True),
], LocationFixtureSyncSettingsTest)
@flag_enabled('HIERARCHICAL_LOCATION_FIXTURE')
@mock.patch('corehq.apps.domain.models.Domain.uses_locations', lambda: True)
def test_sync_format(self, fixture_restore_type, sync_flat, sync_hierarchical):
app = MockApp(location_fixture_restore=fixture_restore_type, get_id="build")
conf = LocationFixtureConfiguration.for_domain(self.domain_obj.name)
conf.sync_hierarchical_fixture = not sync_hierarchical
conf.sync_flat_fixture = not sync_flat
conf.save()
self.assertIs(should_sync_hierarchical_fixture(self.domain_obj, app), sync_hierarchical)
self.assertIs(should_sync_flat_fixture(self.domain_obj, app), sync_flat)
| 40.595032 | 126 | 0.656381 |
12ea67d8fcaa8bad424993e9716a65194a1a355a
| 16,035 |
py
|
Python
|
test/parallel/test_tensorflow_keras.py
|
zarzen/horovod
|
d3c93d7b97c6158f003dad9aa377fe2bbf194e38
|
[
"Apache-2.0"
] | 3 |
2021-01-31T11:18:18.000Z
|
2021-06-01T03:01:24.000Z
|
test/parallel/test_tensorflow_keras.py
|
zarzen/horovod
|
d3c93d7b97c6158f003dad9aa377fe2bbf194e38
|
[
"Apache-2.0"
] | 6 |
2019-10-22T17:19:13.000Z
|
2020-05-20T20:53:02.000Z
|
test/parallel/test_tensorflow_keras.py
|
zarzen/horovod
|
d3c93d7b97c6158f003dad9aa377fe2bbf194e38
|
[
"Apache-2.0"
] | 4 |
2019-10-03T20:40:16.000Z
|
2020-11-17T15:23:28.000Z
|
# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for horovod.tensorflow.keras."""
import math
import numpy as np
import pytest
import tensorflow as tf
import warnings
from distutils.version import LooseVersion
from tensorflow import keras
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
import horovod.tensorflow.keras as hvd
from common import temppath
@pytest.mark.skipif(LooseVersion(tf.__version__) >= LooseVersion('2.0.0'), reason='TensorFlow v1 tests')
class TfKerasTests(tf.test.TestCase):
"""
Tests for ops in horovod.tensorflow.keras.
"""
def __init__(self, *args, **kwargs):
super(TfKerasTests, self).__init__(*args, **kwargs)
warnings.simplefilter('module')
hvd.init()
self.config = tf.compat.v1.ConfigProto()
self.config.gpu_options.allow_growth = True
self.config.gpu_options.visible_device_list = str(hvd.local_rank())
def train_model(self, backward_passes_per_step):
with self.test_session(config=self.config) as sess:
K.set_session(sess)
opt = keras.optimizers.RMSprop(lr=0.0001)
opt = hvd.DistributedOptimizer(
opt,
backward_passes_per_step=backward_passes_per_step,
average_aggregated_gradients=True)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.ThresholdedReLU(0.5))
model.compile(loss=keras.losses.mean_squared_error,
optimizer=opt,
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
def generator():
while 1:
yield (x, y)
# No assertions, we just need to verify that it doesn't hang
callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0)]
model.fit_generator(generator(),
steps_per_epoch=10,
callbacks=callbacks,
epochs=0,
verbose=0,
workers=4,
initial_epoch=1)
def test_train_model(self):
self.train_model(backward_passes_per_step=1)
def test_train_model_with_gradient_aggregation(self):
self.train_model(backward_passes_per_step=2)
def test_sparse_as_dense(self):
with self.test_session(config=self.config) as sess:
K.set_session(sess)
opt = keras.optimizers.RMSprop(lr=0.0001)
opt = hvd.DistributedOptimizer(opt, sparse_as_dense=True)
model = keras.models.Sequential()
model.add(keras.layers.Embedding(1000, 64, input_length=10))
model.compile(loss=keras.losses.mean_squared_error,
optimizer=opt)
x = np.random.randint(1000, size=(32, 10))
y = np.random.random((32, 10, 64))
# No assertions, we just need to verify that it doesn't hang
model.train_on_batch(x, y)
def test_load_model(self):
with self.test_session(config=self.config) as sess:
K.set_session(sess)
opt = keras.optimizers.RMSprop(lr=0.0001)
opt = hvd.DistributedOptimizer(opt)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=opt,
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
with temppath() as fname:
model.save(fname)
new_model = hvd.load_model(fname)
new_opt = new_model.optimizer
self.assertEqual(type(new_opt).__module__, 'horovod._keras')
self.assertEqual(type(new_opt).__name__, 'RMSprop')
self.assertEqual(K.get_value(opt.lr), K.get_value(new_opt.lr))
self._check_optimizer_weights(opt, new_opt)
def test_load_model_custom_optimizers(self):
class TestOptimizer(keras.optimizers.RMSprop):
def __init__(self, **kwargs):
super(TestOptimizer, self).__init__(**kwargs)
with self.test_session(config=self.config) as sess:
K.set_session(sess)
opt = TestOptimizer(lr=0.0001)
opt = hvd.DistributedOptimizer(opt)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=opt,
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
with temppath() as fname:
model.save(fname)
custom_optimizers = [TestOptimizer]
new_model = hvd.load_model(fname, custom_optimizers=custom_optimizers)
new_opt = new_model.optimizer
self.assertEqual(type(new_opt).__module__, 'horovod._keras')
self.assertEqual(type(new_opt).__name__, 'TestOptimizer')
self._check_optimizer_weights(opt, new_opt)
def test_load_model_custom_objects(self):
class TestOptimizer(keras.optimizers.RMSprop):
def __init__(self, **kwargs):
super(TestOptimizer, self).__init__(**kwargs)
with self.test_session(config=self.config) as sess:
K.set_session(sess)
opt = TestOptimizer(lr=0.0001)
opt = hvd.DistributedOptimizer(opt)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=opt,
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
with temppath() as fname:
model.save(fname)
custom_objects = {
'TestOptimizer': lambda **kwargs: hvd.DistributedOptimizer(
TestOptimizer(**kwargs))
}
new_model = hvd.load_model(fname, custom_objects=custom_objects)
new_opt = new_model.optimizer
self.assertEqual(type(new_opt).__module__, 'horovod._keras')
self.assertEqual(type(new_opt).__name__, 'TestOptimizer')
self.assertEqual(K.get_value(opt.lr), K.get_value(new_opt.lr))
self._check_optimizer_weights(opt, new_opt)
def test_load_model_broadcast(self):
def create_model():
opt = keras.optimizers.SGD(lr=0.01 * hvd.size(), momentum=0.9)
opt = hvd.DistributedOptimizer(opt)
model = keras.models.Sequential()
model.add(keras.layers.Dense(2, input_shape=(3,)))
model.add(keras.layers.RepeatVector(3))
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
model.compile(loss=keras.losses.MSE,
optimizer=opt,
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal')
return model
with temppath() as fname:
with self.session(config=self.config) as sess:
K.set_session(sess)
model = create_model()
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
if hvd.rank() == 0:
model.save(fname)
K.clear_session()
with self.session(config=self.config) as sess:
K.set_session(sess)
weight = np.random.random((1, 3))
if hvd.rank() == 0:
model = hvd.load_model(fname)
else:
model = create_model()
def generator():
while 1:
yield (x, y, weight)
if hvd.rank() == 0:
self.assertEqual(len(model.optimizer.weights), 5)
else:
self.assertEqual(len(model.optimizer.weights), 0)
# No assertions, we just need to verify that it doesn't hang
callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0)]
model.fit_generator(generator(),
steps_per_epoch=1,
callbacks=callbacks,
epochs=1,
verbose=0,
workers=4,
initial_epoch=0)
self.assertEqual(len(model.optimizer.weights), 5)
def _check_optimizer_weights(self, opt, new_opt):
self.assertEqual(len(opt.get_weights()), len(new_opt.get_weights()))
for weights, new_weights in zip(opt.get_weights(),
new_opt.get_weights()):
if np.isscalar(weights):
self.assertEqual(weights, new_weights)
else:
self.assertListEqual(weights.tolist(), new_weights.tolist())
def test_from_config(self):
with self.test_session(config=self.config) as sess:
K.set_session(sess)
opt = keras.optimizers.Adam()
hopt = hvd.DistributedOptimizer(opt)
cfg = hopt.get_config()
hopt_copy1 = hopt.from_config(cfg)
self.assertEqual(cfg, hopt_copy1.get_config())
hopt_copy2 = hopt.__class__.from_config(cfg)
self.assertEqual(cfg, hopt_copy2.get_config())
def test_elastic_state(self):
with self.test_session(config=self.config) as sess:
K.set_session(sess)
v = 1.0 if hvd.rank() == 0 else 2.0
model1 = tf.keras.Sequential([
tf.keras.layers.Dense(2, activation='softmax')
])
model1.build((2, 2))
model1.set_weights(
[np.array([[v, v], [v, v]], dtype=np.float32),
np.array([v, v], dtype=np.float32)])
model2 = tf.keras.Sequential([
tf.keras.layers.Dense(2, activation='softmax')
])
model2.build((2, 2))
model2.set_weights(
[np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32),
np.array([0.0, 0.0], dtype=np.float32)])
optimizer = tf.keras.optimizers.Adam(0.001 * hvd.size())
state = hvd.elastic.KerasState(model1, optimizer, batch=20 + hvd.rank(), epoch=10 + hvd.rank())
state.sync()
model1_weights = model1.get_weights()
model2_weights = model2.get_weights()
# After sync, all values should match the root rank
for w in state.model.get_weights():
self.assertAllClose(w, np.ones_like(w))
assert state.batch == 20
assert state.epoch == 10
# Partially modify then restore
model1.set_weights(model2_weights)
state.batch = 21
state.epoch = 11
state.restore()
for w1, w2 in zip(model1.get_weights(), model1_weights):
self.assertAllClose(w1, w2)
assert state.batch == 20
assert state.epoch == 10
# Partially modify then commit
model1.set_weights(model2_weights)
state.batch = 21
state.epoch = 11
state.commit()
state.restore()
for w1, w2 in zip(model1.get_weights(), model2_weights):
self.assertAllClose(w1, w2)
assert state.batch == 21
assert state.epoch == 11
def test_gradient_aggregation(self):
with self.test_session(config=self.config) as sess:
class TestingOptimizer(optimizer_v2.OptimizerV2):
"""
Custom optimizer we use for testing gradient aggregation.
"""
def get_config(self):
config = super(TestingOptimizer, self).get_config()
return config
def _create_slots(self, var_list):
pass
def _resource_apply_dense(self, grad, var, apply_state=None):
return var.assign_add(grad)
K.set_session(sess)
session = tf.compat.v1.keras.backend.get_session(op_input_list=())
backward_passes_per_step = 4
hvd_optimizer = hvd.DistributedOptimizer(
optimizer=TestingOptimizer("test"),
backward_passes_per_step=backward_passes_per_step,
average_aggregated_gradients=True,
)
iterations = hvd_optimizer.iterations
session.run(iterations.initializer)
def compute_expected_value(batch_id):
sum_per_aggregation = 0.0
for _ in range(backward_passes_per_step):
grads_for_batch = 0.0
for rank in range(hvd.size()):
grads_for_batch += rank
# Apply `average_aggregated_gradients`.
grads_for_batch /= float(backward_passes_per_step)
# Averages across workers.
sum_per_aggregation += grads_for_batch / float(hvd.size())
aggregations_completed = math.floor((batch_id + 1) / backward_passes_per_step)
return aggregations_completed * sum_per_aggregation
grads = [tf.constant([float(hvd.rank())])]
variables = [tf.Variable([0.0])]
session.run(variables[0].initializer)
allreduce_op = hvd_optimizer._allreduce(grads)
grads_and_vars = [(allreduce_op[0], variables[0])]
apply_grads_op = hvd_optimizer.apply_gradients(grads_and_vars)
for idx in range(10):
_ = session.run(apply_grads_op)
assert idx + 1 == session.run(hvd_optimizer.iterations)
assert session.run(variables[0].read_value()) == compute_expected_value(idx)
| 38.638554 | 107 | 0.564266 |
ca307617ebe6c5165262ed3a09b1e91470229299
| 13,138 |
py
|
Python
|
analytics/test/client.py
|
gokulsg/analytics-python
|
ab5c9188c21b16dc9b280f2d2123461bfec0c9bf
|
[
"Unlicense",
"MIT"
] | 1 |
2021-05-13T21:52:37.000Z
|
2021-05-13T21:52:37.000Z
|
analytics/test/client.py
|
gokulsg/analytics-python
|
ab5c9188c21b16dc9b280f2d2123461bfec0c9bf
|
[
"Unlicense",
"MIT"
] | null | null | null |
analytics/test/client.py
|
gokulsg/analytics-python
|
ab5c9188c21b16dc9b280f2d2123461bfec0c9bf
|
[
"Unlicense",
"MIT"
] | null | null | null |
from datetime import date, datetime
import unittest
import time
import six
import mock
from analytics.version import VERSION
from analytics.client import Client
class TestClient(unittest.TestCase):
def fail(self, e, batch):
"""Mark the failure handler"""
self.failed = True
def setUp(self):
self.failed = False
self.client = Client('testsecret', on_error=self.fail)
def test_requires_write_key(self):
self.assertRaises(AssertionError, Client)
def test_empty_flush(self):
self.client.flush()
def test_basic_track(self):
client = self.client
success, msg = client.track('userId', 'python test event')
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['event'], 'python test event')
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertTrue(isinstance(msg['messageId'], str))
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['properties'], {})
self.assertEqual(msg['type'], 'track')
def test_stringifies_user_id(self):
# A large number that loses precision in node:
# node -e "console.log(157963456373623802 + 1)" > 157963456373623800
client = self.client
success, msg = client.track(
user_id=157963456373623802, event='python test event')
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['userId'], '157963456373623802')
self.assertEqual(msg['anonymousId'], None)
def test_stringifies_anonymous_id(self):
# A large number that loses precision in node:
# node -e "console.log(157963456373623803 + 1)" > 157963456373623800
client = self.client
success, msg = client.track(
anonymous_id=157963456373623803, event='python test event')
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['userId'], None)
self.assertEqual(msg['anonymousId'], '157963456373623803')
def test_advanced_track(self):
client = self.client
success, msg = client.track(
'userId', 'python test event', {'property': 'value'},
{'ip': '192.168.0.1'}, datetime(2014, 9, 3), 'anonymousId',
{'Amplitude': True}, 'messageId')
self.assertTrue(success)
self.assertEqual(msg['timestamp'], '2014-09-03T00:00:00+00:00')
self.assertEqual(msg['properties'], {'property': 'value'})
self.assertEqual(msg['integrations'], {'Amplitude': True})
self.assertEqual(msg['context']['ip'], '192.168.0.1')
self.assertEqual(msg['event'], 'python test event')
self.assertEqual(msg['anonymousId'], 'anonymousId')
self.assertEqual(msg['context']['library'], {
'name': 'analytics-python',
'version': VERSION
})
self.assertEqual(msg['messageId'], 'messageId')
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'track')
def test_basic_identify(self):
client = self.client
success, msg = client.identify('userId', {'trait': 'value'})
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['traits'], {'trait': 'value'})
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertTrue(isinstance(msg['messageId'], str))
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'identify')
def test_advanced_identify(self):
client = self.client
success, msg = client.identify(
'userId', {'trait': 'value'}, {'ip': '192.168.0.1'},
datetime(2014, 9, 3), 'anonymousId', {'Amplitude': True},
'messageId')
self.assertTrue(success)
self.assertEqual(msg['timestamp'], '2014-09-03T00:00:00+00:00')
self.assertEqual(msg['integrations'], {'Amplitude': True})
self.assertEqual(msg['context']['ip'], '192.168.0.1')
self.assertEqual(msg['traits'], {'trait': 'value'})
self.assertEqual(msg['anonymousId'], 'anonymousId')
self.assertEqual(msg['context']['library'], {
'name': 'analytics-python',
'version': VERSION
})
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertEqual(msg['messageId'], 'messageId')
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'identify')
def test_basic_group(self):
client = self.client
success, msg = client.group('userId', 'groupId')
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['groupId'], 'groupId')
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'group')
def test_advanced_group(self):
client = self.client
success, msg = client.group(
'userId', 'groupId', {'trait': 'value'}, {'ip': '192.168.0.1'},
datetime(2014, 9, 3), 'anonymousId', {'Amplitude': True},
'messageId')
self.assertTrue(success)
self.assertEqual(msg['timestamp'], '2014-09-03T00:00:00+00:00')
self.assertEqual(msg['integrations'], {'Amplitude': True})
self.assertEqual(msg['context']['ip'], '192.168.0.1')
self.assertEqual(msg['traits'], {'trait': 'value'})
self.assertEqual(msg['anonymousId'], 'anonymousId')
self.assertEqual(msg['context']['library'], {
'name': 'analytics-python',
'version': VERSION
})
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertEqual(msg['messageId'], 'messageId')
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'group')
def test_basic_alias(self):
client = self.client
success, msg = client.alias('previousId', 'userId')
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['previousId'], 'previousId')
self.assertEqual(msg['userId'], 'userId')
def test_basic_page(self):
client = self.client
success, msg = client.page('userId', name='name')
self.assertFalse(self.failed)
client.flush()
self.assertTrue(success)
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'page')
self.assertEqual(msg['name'], 'name')
def test_advanced_page(self):
client = self.client
success, msg = client.page(
'userId', 'category', 'name', {'property': 'value'},
{'ip': '192.168.0.1'}, datetime(2014, 9, 3), 'anonymousId',
{'Amplitude': True}, 'messageId')
self.assertTrue(success)
self.assertEqual(msg['timestamp'], '2014-09-03T00:00:00+00:00')
self.assertEqual(msg['integrations'], {'Amplitude': True})
self.assertEqual(msg['context']['ip'], '192.168.0.1')
self.assertEqual(msg['properties'], {'property': 'value'})
self.assertEqual(msg['anonymousId'], 'anonymousId')
self.assertEqual(msg['context']['library'], {
'name': 'analytics-python',
'version': VERSION
})
self.assertEqual(msg['category'], 'category')
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertEqual(msg['messageId'], 'messageId')
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'page')
self.assertEqual(msg['name'], 'name')
def test_basic_screen(self):
client = self.client
success, msg = client.screen('userId', name='name')
client.flush()
self.assertTrue(success)
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'screen')
self.assertEqual(msg['name'], 'name')
def test_advanced_screen(self):
client = self.client
success, msg = client.screen(
'userId', 'category', 'name', {'property': 'value'},
{'ip': '192.168.0.1'}, datetime(2014, 9, 3), 'anonymousId',
{'Amplitude': True}, 'messageId')
self.assertTrue(success)
self.assertEqual(msg['timestamp'], '2014-09-03T00:00:00+00:00')
self.assertEqual(msg['integrations'], {'Amplitude': True})
self.assertEqual(msg['context']['ip'], '192.168.0.1')
self.assertEqual(msg['properties'], {'property': 'value'})
self.assertEqual(msg['anonymousId'], 'anonymousId')
self.assertEqual(msg['context']['library'], {
'name': 'analytics-python',
'version': VERSION
})
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertEqual(msg['messageId'], 'messageId')
self.assertEqual(msg['category'], 'category')
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'screen')
self.assertEqual(msg['name'], 'name')
def test_flush(self):
client = self.client
# set up the consumer with more requests than a single batch will allow
for _ in range(1000):
_, _ = client.identify('userId', {'trait': 'value'})
# We can't reliably assert that the queue is non-empty here; that's
# a race condition. We do our best to load it up though.
client.flush()
# Make sure that the client queue is empty after flushing
self.assertTrue(client.queue.empty())
def test_shutdown(self):
client = self.client
# set up the consumer with more requests than a single batch will allow
for _ in range(1000):
_, _ = client.identify('userId', {'trait': 'value'})
client.shutdown()
# we expect two things after shutdown:
# 1. client queue is empty
# 2. consumer thread has stopped
self.assertTrue(client.queue.empty())
for consumer in client.consumers:
self.assertFalse(consumer.is_alive())
def test_synchronous(self):
client = Client('testsecret', sync_mode=True)
success, _ = client.identify('userId')
self.assertFalse(client.consumers)
self.assertTrue(client.queue.empty())
self.assertTrue(success)
def test_overflow(self):
client = Client('testsecret', max_queue_size=1)
# Ensure consumer thread is no longer uploading
client.join()
for _ in range(10):
client.identify('userId')
success, _ = client.identify('userId')
# Make sure we are informed that the queue is at capacity
self.assertFalse(success)
def test_success_on_invalid_write_key(self):
client = Client('bad_key', on_error=self.fail)
client.track('userId', 'event')
client.flush()
self.assertFalse(self.failed)
def test_unicode(self):
Client(six.u('unicode_key'))
def test_numeric_user_id(self):
self.client.track(1234, 'python event')
self.client.flush()
self.assertFalse(self.failed)
def test_debug(self):
Client('bad_key', debug=True)
def test_identify_with_date_object(self):
client = self.client
success, msg = client.identify(
'userId',
{
'birthdate': date(1981, 2, 2),
},
)
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['traits'], {'birthdate': date(1981, 2, 2)})
def test_gzip(self):
client = Client('testsecret', on_error=self.fail, gzip=True)
for _ in range(10):
client.identify('userId', {'trait': 'value'})
client.flush()
self.assertFalse(self.failed)
def test_user_defined_flush_at(self):
client = Client('testsecret', on_error=self.fail,
flush_at=10, flush_interval=3)
def mock_post_fn(*args, **kwargs):
self.assertEqual(len(kwargs['batch']), 10)
# the post function should be called 2 times, with a batch size of 10
# each time.
with mock.patch('analytics.consumer.post', side_effect=mock_post_fn) \
as mock_post:
for _ in range(20):
client.identify('userId', {'trait': 'value'})
time.sleep(1)
self.assertEqual(mock_post.call_count, 2)
def test_user_defined_timeout(self):
client = Client('testsecret', timeout=10)
for consumer in client.consumers:
self.assertEqual(consumer.timeout, 10)
def test_default_timeout_15(self):
client = Client('testsecret')
for consumer in client.consumers:
self.assertEqual(consumer.timeout, 15)
def test_proxies(self):
client = Client('testsecret', proxies='203.243.63.16:80')
success, msg = client.identify('userId', {'trait': 'value'})
self.assertTrue(success)
| 37.537143 | 79 | 0.602603 |
8047fd8a27e75283f014ce87c529ead34bea762a
| 50,522 |
py
|
Python
|
stella/bytecode.py
|
squisher/stella
|
d9f0b2ebbd853b31c6f75cd0f0286037da4bcaf9
|
[
"Apache-2.0"
] | 11 |
2015-08-03T17:37:46.000Z
|
2021-05-26T07:29:36.000Z
|
stella/bytecode.py
|
squisher/stella
|
d9f0b2ebbd853b31c6f75cd0f0286037da4bcaf9
|
[
"Apache-2.0"
] | 1 |
2016-09-17T01:46:13.000Z
|
2016-09-17T01:46:13.000Z
|
stella/bytecode.py
|
squisher/stella
|
d9f0b2ebbd853b31c6f75cd0f0286037da4bcaf9
|
[
"Apache-2.0"
] | 3 |
2016-05-21T19:17:16.000Z
|
2019-05-10T17:35:37.000Z
|
# Copyright 2013-2015 David Mohr
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dis
import sys
import types
from abc import abstractproperty
import logging
from . import tp
from . import exc
from . import utils
from . import ir
from .storage import Register, StackLoc, GlobalVariable
from .tp import Cast, Const
from .intrinsics import Intrinsic
from copy import copy
def pop_stack(n):
"""
Decorator, it takes n items off the stack
and adds them as bytecode arguments.
"""
def extract_n(f):
def extract_from_stack(self, func, stack):
args = []
for i in range(n):
args.append(stack.pop())
args.reverse()
self.stack_bc = args
return f(self, func, stack)
return extract_from_stack
return extract_n
class Poison(object):
"""
Require that this bytecode is rewritten by bailing out
if it is ever evaluated.
Note that if the child overrides all methods, this mixin will be useless
and should be removed from the child.
"""
def stack_eval(self, func, stack):
raise exc.UnimplementedError(
"{0} must be rewritten".format(
self.__class__.__name__))
def translate(self, cge):
raise exc.UnimplementedError(
"{0} must be rewritten".format(
self.__class__.__name__))
def type_eval(self, func):
raise exc.UnimplementedError(
"{0} must be rewritten".format(
self.__class__.__name__))
class Bytecode(ir.IR):
"""
Parent class for all Python bytecodes
"""
pass
class ResultOnlyBytecode(Poison, ir.IR):
"""Only use this to inject values on the stack which did not originate from
any real bytecode. This will only work at the beginning of a program
because otherwise the bytecode may be used as the origin of a branch.
"""
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
class LOAD_FAST(Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.source = None
def addLocalName(self, func, name):
# TODO: crude?
try:
self.source = func.getRegister(name)
except exc.UndefinedError:
self.source = func.getStackLoc(name)
def addArg(self, arg):
assert self.source is None
self.source = arg
@property
def _str_args(self):
return str(self.source)
def stack_eval(self, func, stack):
stack.push(self)
def type_eval(self, func):
self.grab_stack()
arg_type = self.source.type
if self.result is None:
type_ = type(self.source)
if type_ == StackLoc:
self.result = Register(func.impl)
elif type_ == Register:
self.result = self.source
else:
raise exc.StellaException(
"Invalid LOAD_FAST argument type `{0}'".format(type_))
if type(self.source) == StackLoc:
if arg_type.isReference():
arg_type = arg_type.dereference()
self.result.unify_type(arg_type, self.debuginfo)
def translate(self, cge):
type_ = type(self.source)
if type_ == StackLoc:
self.result.llvm = cge.builder.load(self.source.translate(cge))
elif type_ == Register:
# nothing to load, it's a pseudo instruction in this case
pass
class STORE_FAST(Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.new_allocate = False
self.needs_cast = False
def addLocalName(self, func, name):
# Python does not allocate new names, it just refers to them
(self.result, self.new_allocate) = func.getOrNewStackLoc(name)
def addArg(self, arg):
assert self.result is None
self.result = arg
@pop_stack(1)
def stack_eval(self, func, stack):
pass
def type_eval(self, func):
self.grab_stack()
# func.retype(self.result.unify_type(self.args[1].type, self.debuginfo))
arg = self.args[0]
if arg.type.complex_on_stack or arg.type.on_heap:
type_ = tp.Reference(arg.type)
else:
type_ = arg.type
widened, needs_cast = self.result.unify_type(type_, self.debuginfo)
if widened:
# TODO: can I avoid a retype in some cases?
func.retype()
if needs_cast or self.needs_cast:
self.needs_cast = True
self.args[0] = Cast(arg, self.result.type)
def translate(self, cge):
self.cast(cge)
arg = self.args[0]
if self.new_allocate:
type_ = self.result.type
if type_.on_heap:
type_ = type_.dereference()
llvm_type = type_.llvmType(cge.module)
self.result.llvm = cge.builder.alloca(llvm_type, name=self.result.name)
cge.builder.store(arg.translate(cge), self.result.translate(cge))
class STORE_GLOBAL(Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def addName(self, func, name):
# Python does not allocate new names, it just refers to them
try:
self.result = func.loadGlobal(name)
except exc.UndefinedError:
self.result = func.newGlobal(name)
@pop_stack(1)
def stack_eval(self, func, stack):
pass
def type_eval(self, func):
self.grab_stack()
# func.retype(self.result.unify_type(self.args[1].type, self.debuginfo))
arg = self.args[0]
if self.result.initial_value is None:
# This means we're defining a new variable
self.result.setInitialValue(arg)
widened, needs_cast = self.result.unify_type(arg.type, self.debuginfo)
if widened:
# TODO: can I avoid a retype in some cases?
func.retype()
if needs_cast:
# TODO: side effect! Maybe that's for the best.
self.args[0] = Cast(arg, self.result.type)
def translate(self, cge):
# Assume that the global has been allocated already.
self.cast(cge)
cge.builder.store(self.args[0].translate(cge), self.result.translate(cge))
class LOAD_CONST(Bytecode):
discard = True
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def addArg(self, arg):
assert self.const_arg is None
self.const_arg = arg
def stack_eval(self, func, stack):
self.result = self.const_arg
stack.push(self)
def type_eval(self, func):
pass
def translate(self, cge):
pass
class BinaryOp(Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.result = Register(func)
self.needs_cast = [False, False]
@pop_stack(2)
def stack_eval(self, func, stack):
stack.push(self)
def type_eval(self, func):
self.grab_stack()
for i in range(len(self.args)):
arg = self.args[i]
if arg.type == self.result.type:
# a cast may have been necessary in the previous iteration,
# but now the argument may have changed type, so check before
# continuing
self.needs_cast[i] = False
if self.needs_cast[i]:
# install the cast before unify_type() because otherwise we're
# in an infinite loop retyping the function
self.args[i] = Cast(arg, self.result.type)
widened, needs_cast = self.result.unify_type(arg.type, self.debuginfo)
if widened:
# TODO: can I avoid a retype in some cases?
# It could definitely be smarter and retype the other parameter
# directly if need be.
func.retype()
if needs_cast:
self.needs_cast[i] = True
# install the cast here because we may not get re-typed
self.args[i] = Cast(arg, self.result.type)
def builderFuncName(self):
try:
return self.b_func[self.result.type]
except KeyError:
raise exc.TypeError(
"{0} does not yet implement type {1}".format(
self.__class__.__name__,
self.result.type))
def translate(self, cge):
self.cast(cge)
f = getattr(cge.builder, self.builderFuncName())
self.result.llvm = f(
self.args[0].translate(cge),
self.args[1].translate(cge))
@abstractproperty
def b_func(self):
return {}
class BINARY_ADD(BinaryOp):
b_func = {tp.Float: 'fadd', tp.Int: 'add'}
class BINARY_SUBTRACT(BinaryOp):
b_func = {tp.Float: 'fsub', tp.Int: 'sub'}
class BINARY_MULTIPLY(BinaryOp):
b_func = {tp.Float: 'fmul', tp.Int: 'mul'}
class BINARY_MODULO(BinaryOp):
b_func = {tp.Float: 'frem', tp.Int: 'srem'}
class BINARY_POWER(BinaryOp):
b_func = {tp.Float: 'llvm.pow', tp.Int: 'llvm.powi'}
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.result = Register(func)
@pop_stack(2)
def stack_eval(self, func, stack):
stack.push(self)
def type_eval(self, func):
self.grab_stack()
# TODO if args[1] is int but negative, then the result will be float, too!
super().type_eval(func)
def translate(self, cge):
# llvm.pow[i]'s first argument always has to be float
arg = self.args[0]
if arg.type == tp.Int:
self.args[0] = Cast(arg, tp.Float)
self.cast(cge)
if self.args[1].type == tp.Int:
# powi takes a i32 argument
power = cge.builder.trunc(
self.args[1].translate(cge),
tp.tp_int32,
'(i32)' +
self.args[1].name)
else:
power = self.args[1].translate(cge)
llvm_pow = cge.module.llvm.declare_intrinsic(self.b_func[self.args[1].type],
[self.args[0].llvmType(cge.module)])
pow_result = cge.builder.call(llvm_pow, [self.args[0].translate(cge), power])
if isinstance(self.args[0], Cast) and \
self.args[0].obj.type == tp.Int and self.args[1].type == tp.Int:
# cast back to an integer
self.result.llvm = cge.builder.fptosi(pow_result, tp.Int.llvmType(cge.module))
else:
self.result.llvm = pow_result
class BINARY_FLOOR_DIVIDE(BinaryOp):
"""Python compliant `//' operator.
Slow since it has to perform type conversions and floating point division for integers"""
b_func = {
tp.Float: 'fdiv',
tp.Int: 'fdiv'} # NOT USED, but required to make it a concrete class
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def type_eval(self, func):
self.grab_stack()
for arg in self.args:
widened, _ = self.result.unify_type(arg.type, self.debuginfo)
func.retype(widened)
def translate(self, cge):
is_int = all([arg.type == tp.Int for arg in self.args])
for i in range(len(self.args)):
if self.args[i].type != tp.Float:
self.args[i] = Cast(self.args[i], tp.Float)
self.cast(cge)
tmp = cge.builder.fdiv(
self.args[0].translate(cge),
self.args[1].translate(cge))
llvm_floor = cge.module.llvm.declare_intrinsic('llvm.floor',
[tp.Float.llvmType(cge.module)])
self.result.llvm = cge.builder.call(llvm_floor, [tmp])
if is_int:
# TODO this may be superflous if both args got converted to float
# in the translation stage -> move toFloat partially to the
# analysis stage.
self.result.llvm = cge.builder.fptosi(
self.result.translate(cge),
tp.Int.llvmType(cge.module),
"(int)" +
self.result.name)
class BINARY_TRUE_DIVIDE(BinaryOp):
b_func = {tp.Float: 'fdiv'}
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.result = Register(func)
@pop_stack(2)
def stack_eval(self, func, stack):
stack.push(self)
def type_eval(self, func):
self.grab_stack()
# The result of `/', true division, is always a float
self.result.type = tp.Float
super().type_eval(func)
class INPLACE_ADD(BINARY_ADD):
pass
class INPLACE_SUBTRACT(BINARY_SUBTRACT):
pass
class INPLACE_MULTIPLY(BINARY_MULTIPLY):
pass
class INPLACE_TRUE_DIVIDE(BINARY_TRUE_DIVIDE):
pass
class INPLACE_FLOOR_DIVIDE(BINARY_FLOOR_DIVIDE):
pass
class INPLACE_MODULO(BINARY_MODULO):
pass
class COMPARE_OP(Bytecode):
b_func = {tp.Float: 'fcmp_ordered', tp.Int: 'icmp_signed', tp.Bool: 'icmp_signed'}
op = None
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.result = Register(func)
def addCmp(self, op):
self.op = op
@pop_stack(2)
def stack_eval(self, func, stack):
stack.push(self)
def type_eval(self, func):
self.grab_stack()
self.result.type = tp.Bool
# upcast integers to float if required
if (self.args[0].type == tp.Int and self.args[1].type == tp.Float):
self.args[0] = Cast(self.args[0], tp.Float)
if (self.args[0].type == tp.Float and self.args[1].type == tp.Int):
self.args[1] = Cast(self.args[1], tp.Float)
if (self.args[0].type != self.args[1].type and
self.args[0].type != tp.NoType and self.args[1].type != tp.NoType):
raise exc.TypeError(
"Comparing different types ({0} with {1})".format(
self.args[0].type,
self.args[1].type))
def translate(self, cge):
# assume both types are the same, see @stack_eval
type_ = self.args[0].type
if not self.args[0].type in self.b_func:
raise exc.UnimplementedError(type_)
f = getattr(cge.builder, self.b_func[type_])
llvm = f(self.op,
self.args[0].translate(cge),
self.args[1].translate(cge))
# the comparison returns i1 but we need to return an i8
self.result.llvm = cge.builder.zext(llvm, tp.Bool.llvmType(cge))
class RETURN_VALUE(utils.BlockTerminal, Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.func = func
@pop_stack(1)
def stack_eval(self, func, stack):
pass
def type_eval(self, func):
self.grab_stack()
self.result = self.args[0]
for arg in self.args:
func.retype(self.result.unify_type(arg.type, self.debuginfo))
def translate(self, cge):
if self.result.type is tp.Void:
if self.func.result.type is tp.Void:
cge.builder.ret_void()
else:
cge.builder.ret(self.func.result.type.null(cge.module))
else:
cge.builder.ret(self.result.translate(cge))
class HasTarget(object):
target_label = None
target_bc = None
def setTargetBytecode(self, bc):
self.target_bc = bc
def updateTargetBytecode(self, old_bc, new_bc):
self.setTargetBytecode(new_bc)
def setTarget(self, label):
self.target_label = label
def __str__(self):
return "{0} {1} {2}".format(
self.__class__.__name__,
self.target_label,
", ".join([str(v) for v in self.args]))
class Jump(utils.BlockTerminal, HasTarget, ir.IR):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def processFallThrough(self):
return False
def stack_eval(self, func, stack):
assert self.target_bc is not None
return [(self.target_bc, stack)]
def type_eval(self, func):
self.grab_stack()
pass
def translate(self, cge):
cge.builder.branch(self.target_bc.block)
def equivalent(self, other):
"""Equality but location independent.
"""
return type(self) == type(other) and self.target_bc == other.target_bc
class Jump_if_X_or_pop(Jump):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def processFallThrough(self):
self.fallthrough = self.next
return True
def updateTargetBytecode(self, old_bc, new_bc):
if old_bc == self.target_bc:
self.setTargetBytecode(new_bc)
else:
assert self.fallthrough == old_bc
self.fallthrough = new_bc
@pop_stack(1)
def stack_eval(self, func, stack):
stack2 = stack.clone()
r = []
# if X, push back onto stack and jump:
stack.push(self.stack_bc[0])
r.append((self.target_bc, stack))
# else continue with the next instruction (and keep the popped value)
r.append((self.next, stack2))
return r
class JUMP_IF_FALSE_OR_POP(Jump_if_X_or_pop, Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def translate(self, cge):
cond = tp.Cast.translate_i1(self.args[0], cge)
cge.builder.cbranch(cond,
self.next.block,
self.target_bc.block)
class JUMP_IF_TRUE_OR_POP(Jump_if_X_or_pop, Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def translate(self, cge):
cond = tp.Cast.translate_i1(self.args[0], cge)
cge.builder.cbranch(cond,
self.target_bc.block,
self.next.block)
class Pop_jump_if_X(Jump):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.additional_pops = 0
def processFallThrough(self):
self.fallthrough = self.next
return True
def updateTargetBytecode(self, old_bc, new_bc):
if old_bc == self.target_bc:
self.setTargetBytecode(new_bc)
else:
assert self.fallthrough == old_bc
self.fallthrough = new_bc
def additionalPop(self, i):
"""Deviate from Python semantics: pop i more items off the stack WHEN jumping.
Instead of the Python semantics to pop one value of the stack, pop i more when jumping.
"""
self.additional_pops = i
@pop_stack(1)
def stack_eval(self, func, stack):
r = []
# if X, jump
jump_stack = stack.clone()
for i in range(self.additional_pops):
jump_stack.pop()
r.append((self.target_bc, jump_stack))
# else continue to the next instruction
r.append((self.next, stack))
# (pop happens in any case)
return r
class POP_JUMP_IF_FALSE(Pop_jump_if_X, Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def translate(self, cge):
cond = tp.Cast.translate_i1(self.args[0], cge)
cge.builder.cbranch(cond,
self.next.block,
self.target_bc.block)
class POP_JUMP_IF_TRUE(Pop_jump_if_X, Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def translate(self, cge):
cond = tp.Cast.translate_i1(self.args[0], cge)
cge.builder.cbranch(cond,
self.target_bc.block,
self.next.block)
class SETUP_LOOP(utils.BlockStart, HasTarget, Bytecode):
"""
Will either be rewritten (for loop) or has no effect other than mark the
start of a block (while loop).
"""
discard = True
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def stack_eval(self, func, stack):
pass
def translate(self, cge):
pass
def type_eval(self, func):
pass
class POP_BLOCK(utils.BlockEnd, Bytecode):
discard = True
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def stack_eval(self, func, stack):
pass
def translate(self, cge):
pass
def type_eval(self, func):
pass
class LOAD_GLOBAL(Bytecode):
var = None
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def addName(self, func, name):
self.args.append(name)
def stack_eval(self, func, stack):
stack.push(self)
def translate(self, cge):
if isinstance(self.var, ir.FunctionRef):
pass
elif isinstance(self.var, GlobalVariable):
self.result.llvm = cge.builder.load(self.var.translate(cge))
def type_eval(self, func):
self.grab_stack()
if self.result is None:
self.var = func.impl.loadGlobal(self.args[0])
# TODO: remove these isinstance checks and just check for
# GlobalVariable else return directly?
if isinstance(self.var, ir.FunctionRef):
self.result = self.var
elif isinstance(self.var, types.ModuleType):
self.result = self.var
elif isinstance(self.var, type):
self.result = tp.PyWrapper(self.var)
elif isinstance(self.var, Intrinsic):
self.result = self.var
elif isinstance(self.var, GlobalVariable):
self.result = Register(func.impl)
else:
raise exc.UnimplementedError(
"Unknown global type {0}".format(
type(self.var)))
if isinstance(self.var, GlobalVariable):
self.result.unify_type(self.var.type, self.debuginfo)
class LOAD_ATTR(Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def addName(self, func, name):
self.name = name
@property
def _str_args(self):
if len(self.args) > 0:
obj = self.args[0]
else:
obj = '?'
return "{}.{}".format(obj, self.name)
@pop_stack(1)
def stack_eval(self, func, stack):
stack.push(self)
def type_eval(self, func):
self.grab_stack()
arg = self.args[0]
# TODO would it be better to move some of this into arg.type?
if isinstance(arg, types.ModuleType):
self.result = func.module.loadExt(arg, self.name)
self.discard = True
return
type_ = arg.type.dereference()
if isinstance(type_, tp.StructType):
try:
attr_type = arg.type.dereference().getMemberType(self.name)
except KeyError:
raise exc.AttributeError("Unknown field {} of type {}".format(self.name,
arg.type),
self.debuginfo)
if isinstance(attr_type, tp.FunctionType):
self.result = func.module.getFunctionRef(attr_type)
else:
if self.result is None:
self.result = Register(func.impl)
self.result.unify_type(attr_type, self.debuginfo)
elif isinstance(type_, tp.ArrayType):
if self.result is None:
self.result = Register(func.impl)
self.result.unify_type(tp.get(type_.shape), self.debuginfo)
else:
raise exc.TypeError("Cannot load attribute {} from type {}".format(self.name,
arg.type),
self.debuginfo)
def translate(self, cge):
arg = self.args[0]
if isinstance(arg, types.ModuleType):
return
type_ = arg.type.dereference()
if isinstance(type_, tp.StructType):
tp_attr = type_.getMemberType(self.name)
if isinstance(tp_attr, tp.FunctionType):
self.result.f_self = arg
return
idx = type_.getMemberIdx(self.name)
idx_llvm = tp.getIndex(idx)
struct_llvm = arg.translate(cge)
p = cge.builder.gep(struct_llvm, [tp.Int.constant(0), idx_llvm], inbounds=True)
self.result.llvm = cge.builder.load(p)
elif isinstance(type_, tp.ArrayType):
val = tp.wrapValue(type_.shape)
self.result.llvm = val.translate(cge)
else:
raise exc.UnimplementedError(type(arg))
class STORE_ATTR(Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
# TODO: Does the result have to be a register? Don't I only need it for
# the llvm propagation?
self.result = Register(func)
def addName(self, func, name):
self.name = name
@pop_stack(2)
def stack_eval(self, func, stack):
pass
def type_eval(self, func):
self.grab_stack()
type_ = self.args[1].type.dereference()
if isinstance(type_, tp.StructType):
member_type = type_.getMemberType(self.name)
arg_type = self.args[0].type
if member_type != arg_type:
if member_type == tp.Float and arg_type == tp.Int:
self.args[0] = tp.Cast(self.args[0], tp.Float)
return
# TODO would it speed up the algorithm if arg_type is set to be
# member_type here?
if arg_type == tp.NoType:
# will be retyped anyway
return
raise exc.TypeError("Argument type {} incompatible with member type {}".format(
arg_type, member_type))
else:
raise exc.UnimplementedError(
"Cannot store attribute {0} of an object with type {1}".format(
self.name,
type(self.args[1])))
def translate(self, cge):
if (isinstance(self.args[1], tp.Typable)
and isinstance(self.args[1].type.dereference(), tp.StructType)):
struct_llvm = self.args[1].translate(cge)
idx = self.args[1].type.dereference().getMemberIdx(self.name)
idx_llvm = tp.getIndex(idx)
val_llvm = self.args[0].translate(cge)
p = cge.builder.gep(struct_llvm, [tp.Int.constant(0), idx_llvm], inbounds=True)
self.result.llvm = cge.builder.store(val_llvm, p)
else:
raise exc.UnimplementedError(type(self.args[1]))
class CALL_FUNCTION(Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def addRawArg(self, arg):
self.num_pos_args = arg & 0xFF
self.num_kw_args = (arg >> 8) & 0xFF
self.num_stack_args = self.num_pos_args + self.num_kw_args*2
@property
def _str_args(self):
return str(self.func)
def separateArgs(self):
self.func = self.args[0]
args = self.args[1:]
# pdb.set_trace()
assert len(args) == self.num_stack_args
self.kw_args = {}
for i in range(self.num_kw_args):
# the list is reversed, so the value comes first
value = args.pop()
key = args.pop()
# key is a Const object, unwrap it
self.kw_args[key.value] = value
# remainder is positional
self.args = args
def stack_eval(self, func, stack):
self.stack_bc = []
for i in range(self.num_pos_args + 2*self.num_kw_args + 1):
arg = stack.pop()
self.stack_bc.append(arg)
self.stack_bc.reverse()
stack.push(self)
def type_eval(self, func):
self.grab_stack()
self.separateArgs()
if not isinstance(self.func, (ir.FunctionRef, Intrinsic, ir.ExtFunctionRef)):
# we don't officially know yet that what we're calling is a
# function, so install a dummy result and redo the analysis later
func.impl.analyzeAgain()
self.result = Register(func.impl)
return
if self.result is None or self.result.type == tp.NoType:
self.result = self.func.getResult(func.impl)
if not isinstance(self.func, Intrinsic):
func.module.functionCall(self.func, self.args, self.kw_args)
type_ = self.func.getReturnType(self.args, self.kw_args)
tp_change = self.result.unify_type(type_, self.debuginfo)
if self.result.type == tp.NoType:
# abort here because mostly everything downstream will be unknown types
return True
else:
func.retype(tp_change)
def translate(self, cge):
self.result.llvm = self.func.call(
cge,
self.args,
self.kw_args)
class GET_ITER(Poison, Bytecode):
discard = True
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
class FOR_ITER(Poison, HasTarget, Bytecode):
"""WIP"""
discard = True
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
class JUMP_ABSOLUTE(Jump, Bytecode):
"""WIP"""
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
class JUMP_FORWARD(Jump, Bytecode):
"""WIP"""
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
class ForLoop(HasTarget, ir.IR):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.iterable = None
def setLoopVar(self, loop_var):
self.loop_var = loop_var
def setLimit(self, limit):
self.limit = limit
def setStart(self, start):
self.start = start
def setEndLoc(self, end_loc):
self.target_label = end_loc
def setTestLoc(self, loc):
self.test_loc = loc
def setIterLoc(self, loc):
"""The location of FOR_ITER which may be referenced as 'restart loop'"""
self.iter_loc = loc
def setIterable(self, iterable):
"""The LOAD of X which we are iterating over: for _ in X:"""
self.iterable = iterable
def basicSetup(self, bc):
iter_loc = bc.loc
start = None
iterable = None
cur = bc.prev
if not isinstance(cur, GET_ITER):
raise exc.UnimplementedError('unsupported for loop')
cur.remove()
cur = bc.prev
if isinstance(cur, (LOAD_ATTR, LOAD_GLOBAL, LOAD_FAST)):
iterable = cur
limit = Const(0)
cur.remove()
cur = bc.prev
if isinstance(iterable, LOAD_ATTR):
# LOAD_ATTR requires the object to load, and iterable.prev
# still refers to it
cur.remove()
# iterable should point to the first instruction required
cur.next = iterable
iterable = cur
cur = bc.prev
else:
if not isinstance(cur, CALL_FUNCTION):
raise exc.UnimplementedError('unsupported for loop')
cur.remove()
cur = bc.prev
# TODO: this if..elif should be more general!
if isinstance(cur, LOAD_FAST):
limit = cur.source
cur.remove()
elif isinstance(cur, LOAD_CONST):
limit = cur.const_arg
cur.remove()
elif isinstance(cur, CALL_FUNCTION):
cur.remove()
limit = [cur]
num_args = cur.num_stack_args+1 # +1 for the function name
i = 0
while i < num_args:
cur = cur.prev
# TODO: HACK. How to make this general and avoid duplicating
# stack_eval() knowledge?
if isinstance(cur, LOAD_ATTR):
# LOAD_ATTR has an argument; num_args is stack values NOT
# the number of bytecodes which i is counting
num_args += 1
cur.remove()
limit.append(cur)
i += 1
elif isinstance(cur, LOAD_ATTR):
limit = [cur, cur.prev]
cur.prev.remove()
cur.remove()
else:
raise exc.UnimplementedError(
'unsupported for loop: limit {0}'.format(
type(cur)))
cur = bc.prev
# this supports a start argument to range
if isinstance(cur, LOAD_FAST) or isinstance(cur, LOAD_CONST):
start = cur
cur.remove()
cur = bc.prev
if not isinstance(cur, SETUP_LOOP):
if not isinstance(cur, LOAD_GLOBAL):
raise exc.UnimplementedError('unsupported for loop')
cur.remove()
cur = bc.prev
if not isinstance(cur, SETUP_LOOP):
raise exc.UnimplementedError('unsupported for loop')
end_loc = cur.target_label
self.loc = cur.loc
# TODO set location for self and transfer jumps!
self.setIterable(iterable)
self.setStart(start)
self.setLimit(limit)
self.setEndLoc(end_loc)
self.setTestLoc(bc.loc)
self.setIterLoc(iter_loc)
cur.insert_after(self)
cur.remove()
cur = bc.next
if not isinstance(cur, STORE_FAST):
raise exc.UnimplementedError('unsupported for loop')
loop_var = cur.result
self.setLoopVar(loop_var)
cur.remove()
bc.remove()
def rewrite(self, func):
def load_loop_value(last, after=True):
if isinstance(self.iterable.next, LOAD_ATTR):
b = copy(self.iterable)
if after:
last.insert_after(b)
last = b
else:
last.insert_before(b)
b = copy(self.iterable.next)
if after:
self.iterable_attr = b
if after:
last.insert_after(b)
last = b
else:
last.insert_before(b)
else:
b = copy(self.iterable)
if after:
last.insert_after(b)
last = b
else:
last.insert_before(b)
b = LOAD_FAST(func.impl, self.debuginfo)
b.addArg(self.loop_var)
if after:
last.insert_after(b)
last = b
else:
last.insert_before(b)
b = BINARY_SUBSCR(func.impl, self.debuginfo)
if after:
last.insert_after(b)
last = b
else:
last.insert_before(b)
b = STORE_FAST(func.impl, self.debuginfo)
b.new_allocate = True
b.addArg(self.loop_value)
if after:
last.insert_after(b)
last = b
else:
last.insert_before(b)
return last
last = self
(self.limit_minus_one, _) = func.impl.getOrNewStackLoc(
str(self.test_loc) + "__limit")
if self.iterable:
self.loop_value = self.loop_var
(self.loop_var, _) = func.impl.getOrNewStackLoc(
self.loop_value.name + "__idx")
# init
if self.start:
b = self.start
else:
b = LOAD_CONST(func.impl, self.debuginfo)
b.addArg(Const(0))
last.insert_after(b)
last = b
b = STORE_FAST(func.impl, self.debuginfo)
b.addArg(self.loop_var)
b.new_allocate = True
last.insert_after(b)
last = b
# initial test
b = LOAD_FAST(func.impl, self.debuginfo)
b.addArg(self.loop_var)
b.loc = self.test_loc
func.replaceLocation(b)
last.insert_after(b)
last = b
if isinstance(self.limit, (StackLoc, Register)):
b = LOAD_FAST(func.impl, self.debuginfo)
b.addArg(self.limit)
last.insert_after(b)
last = b
elif isinstance(self.limit, Const):
b = LOAD_CONST(func.impl, self.debuginfo)
b.addArg(self.limit)
last.insert_after(b)
last = b
elif isinstance(self.limit, list):
# limit is return value of a function call
for b in reversed(self.limit):
last.insert_after(b)
last = b
b = DUP_TOP(func.impl, self.debuginfo)
last.insert_after(b)
last = b
b = ROT_THREE(func.impl, self.debuginfo)
last.insert_after(b)
last = b
else:
raise exc.UnimplementedError(
"Unsupported limit type {0}".format(
type(
self.limit)))
b = COMPARE_OP(func.impl, self.debuginfo)
b.addCmp('>=')
last.insert_after(b)
last = b
b = POP_JUMP_IF_TRUE(func.impl, self.debuginfo)
b.setTarget(self.target_label)
if isinstance(self.limit, list):
b.additionalPop(1)
last.insert_after(b)
last = b
# my_limit = limit -1
if isinstance(self.limit, (StackLoc, Register)):
b = LOAD_FAST(func.impl, self.debuginfo)
b.addArg(self.limit)
last.insert_after(b)
last = b
elif isinstance(self.limit, Const):
b = LOAD_CONST(func.impl, self.debuginfo)
b.addArg(self.limit)
last.insert_after(b)
last = b
elif isinstance(self.limit, list):
# Nothing to do, the value is already on the stack
pass
else:
raise exc.UnimplementedError(
"Unsupported limit type {0}".format(
type(
self.limit)))
b = LOAD_CONST(func.impl, self.debuginfo)
b.addArg(Const(1))
last.insert_after(b)
last = b
b = BINARY_SUBTRACT(func.impl, self.debuginfo)
last.insert_after(b)
last = b
b = STORE_FAST(func.impl, self.debuginfo)
b.addArg(self.limit_minus_one)
b.new_allocate = True
last.insert_after(b)
last = b
if self.iterable:
last = load_loop_value(last)
# $body, keep, find the end of it
body_loc = b.linearNext().loc
func.addLabel(b.linearNext())
jump_updates = []
jump_sources = {}
while b.next is not None:
if isinstance(b, Jump):
try:
jump_sources[b.target_label].append(b)
except KeyError:
jump_sources[b.target_label] = [b]
if b.target_label == self.iter_loc:
jump_updates.append(b)
b = b.next
assert isinstance(b, utils.BlockEnd)
jump_loc = b.loc
last = b.prev
b.remove()
# go back to the JUMP and switch locations
loop_test_loc = last.loc
last.loc = jump_loc
func.replaceLocation(last)
for b in jump_updates:
b.setTarget(loop_test_loc)
if last.linearPrev().equivalent(last) and isinstance(last, JUMP_ABSOLUTE):
# Python seems to sometimes add a duplicate JUMP_ABSOLUTE at the
# end of the loop. Remove it and update other jumps that refer to it.
lp = last.linearPrev()
if lp.loc in jump_sources:
for bc_ in jump_sources[lp.loc]:
bc_.target_label = last.loc
lp.remove()
# loop test
# pdb.set_trace()
b = LOAD_FAST(func.impl, self.debuginfo)
b.addArg(self.loop_var)
b.loc = loop_test_loc
func.replaceLocation(b)
last.insert_before(b)
b = LOAD_FAST(func.impl, self.debuginfo)
b.addArg(self.limit_minus_one)
last.insert_before(b)
b = COMPARE_OP(func.impl, self.debuginfo)
b.addCmp('>=')
last.insert_before(b)
b = POP_JUMP_IF_TRUE(func.impl, self.debuginfo)
b.setTarget(self.target_label)
last.insert_before(b)
# increment
b = LOAD_FAST(func.impl, self.debuginfo)
b.addArg(self.loop_var)
last.insert_before(b)
b = LOAD_CONST(func.impl, self.debuginfo)
b.addArg(Const(1))
last.insert_before(b)
b = INPLACE_ADD(func.impl, self.debuginfo)
last.insert_before(b)
b = STORE_FAST(func.impl, self.debuginfo)
b.addArg(self.loop_var)
last.insert_before(b)
if self.iterable:
load_loop_value(last, False)
# JUMP to COMPARE_OP is already part of the bytecodes
last.setTarget(body_loc)
def stack_eval(self, func, stack):
# self.result = func.getOrNewRegister(self.loop_var)
# stack.push(self.result)
pass
def translate(self, cge):
pass
def type_eval(self, func):
self.grab_stack()
if self.iterable:
# TODO if we have an iterable, then we must populate the limit
# here. Yet I am not sure how to detect when this was never
# successful
if isinstance(self.iterable.next, LOAD_ATTR):
if self.iterable_attr.result:
iterable = self.iterable_attr.result
else:
func.retype()
return
else:
iterable = self.iterable.source
if iterable.type != tp.NoType:
type_ = iterable.type.dereference()
self.limit.value = type_.shape
class STORE_SUBSCR(Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
@pop_stack(3)
def stack_eval(self, func, stack):
self.result = None
def type_eval(self, func):
self.grab_stack()
def translate(self, cge):
if self.args[1].type.isReference():
type_ = self.args[1].type.dereference()
else:
type_ = self.args[1].type
type_.storeSubscript(cge, self.args[1], self.args[2], self.args[0])
class BINARY_SUBSCR(Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.result = Register(func)
@pop_stack(2)
def stack_eval(self, func, stack):
stack.push(self)
def type_eval(self, func):
self.grab_stack()
if self.args[0].type.isReference():
arg_type = self.args[0].type.dereference()
else:
arg_type = self.args[0].type
if not isinstance(arg_type, tp.Subscriptable):
raise exc.TypeError(
"Type must be subscriptable, but got {0}".format(
self.args[0].type))
self.result.unify_type(
arg_type.getElementType(self.args[1]),
self.debuginfo)
def translate(self, cge):
if self.args[0].type.isReference():
type_ = self.args[0].type.dereference()
else:
type_ = self.args[0].type
self.result.llvm = type_.loadSubscript(cge, self.args[0], self.args[1])
class POP_TOP(Bytecode):
discard = True
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
@pop_stack(1)
def stack_eval(self, func, stack):
pass
def type_eval(self, func):
self.grab_stack()
def translate(self, cge):
pass
class DUP_TOP(Bytecode):
discard = True
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
@pop_stack(1)
def stack_eval(self, func, stack):
stack.push(self.stack_bc[0])
stack.push(self.stack_bc[0])
def type_eval(self, func):
self.grab_stack()
def translate(self, cge):
pass
class DUP_TOP_TWO(Bytecode):
discard = True
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
@pop_stack(2)
def stack_eval(self, func, stack):
stack.push(self.stack_bc[0])
stack.push(self.stack_bc[1])
stack.push(self.stack_bc[0])
stack.push(self.stack_bc[1])
def type_eval(self, func):
self.grab_stack()
def translate(self, cge):
pass
class ROT_TWO(Bytecode, Poison):
discard = True
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
@pop_stack(2)
def stack_eval(self, func, stack):
stack.push(self.stack_bc[1])
stack.push(self.stack_bc[0])
def type_eval(self, func):
self.grab_stack()
def translate(self, cge):
pass
class ROT_THREE(Bytecode, Poison):
discard = True
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
@pop_stack(3)
def stack_eval(self, func, stack):
stack.push(self.stack_bc[2])
stack.push(self.stack_bc[0])
stack.push(self.stack_bc[1])
def type_eval(self, func):
self.grab_stack()
def translate(self, cge):
pass
class UNARY_NEGATIVE(Bytecode):
b_func = {tp.Float: 'fsub', tp.Int: 'sub'}
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.result = Register(func)
@pop_stack(1)
def stack_eval(self, func, stack):
stack.push(self)
def type_eval(self, func):
self.grab_stack()
arg = self.args[0]
self.result.unify_type(arg.type, self.debuginfo)
def builderFuncName(self):
try:
return self.b_func[self.result.type]
except KeyError:
raise exc.TypeError(
"{0} does not yet implement type {1}".format(
self.__class__.__name__,
self.result.type))
def translate(self, cge):
self.cast(cge)
f = getattr(cge.builder, self.builderFuncName())
self.result.llvm = f(
self.result.type.constant(0),
self.args[0].translate(cge))
class UNPACK_SEQUENCE(Bytecode):
n = 0
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def addRawArg(self, arg):
self.n = arg
@pop_stack(1)
def stack_eval(self, func, stack):
self.result = []
for i in range(self.n):
reg = Register(func)
stack.push(self)
self.result.append(reg)
def type_eval(self, func):
self.grab_stack()
i = 0
for reg in reversed(self.result):
reg.unify_type(self.args[0].type.getElementType(i), self.debuginfo)
i += 1
def translate(self, cge):
if self.args[0].type.isReference():
type_ = self.args[0].type.dereference()
else:
type_ = self.args[0].type
i = 0
for reg in reversed(self.result):
reg.llvm = type_.loadSubscript(cge, self.args[0], i)
i += 1
class BUILD_TUPLE(Bytecode):
n = 0
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def addRawArg(self, arg):
self.n = arg
def stack_eval(self, func, stack):
self.stack_bc = []
for i in range(self.n):
self.stack_bc.append(stack.pop())
stack.push(self)
def type_eval(self, func):
self.grab_stack()
self.args.reverse()
if not self.result:
self.result = tp.Tuple(self.args)
else:
self.result.unify_type(tp.TupleType([arg.type for arg in self.args]),
self.debuginfo)
def translate(self, cge):
self.result.translate(cge)
class RAISE_VARARGS(Bytecode):
"""TODO will abort the program with a crash"""
n = 0
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
def addRawArg(self, arg):
self.n = arg
def stack_eval(self, func, stack):
for i in range(self.n):
stack.pop()
def type_eval(self, func):
self.grab_stack()
def translate(self, cge):
llvm_f = cge.module.llvm.declare_intrinsic('llvm.trap', [])
cge.builder.call(llvm_f, [])
class UNARY_NOT(Bytecode):
def __init__(self, func, debuginfo):
super().__init__(func, debuginfo)
self.result = Register(func)
@pop_stack(1)
def stack_eval(self, func, stack):
stack.push(self)
def type_eval(self, func):
self.grab_stack()
arg = self.args[0]
if arg.type in (tp.Int, tp.Float):
self.args[0] = Cast(arg, tp.Bool)
self.result.unify_type(tp.Bool, self.debuginfo)
def translate(self, cge):
self.cast(cge)
self.result.llvm = cge.builder.xor(
tp.Bool.constant(1),
self.args[0].translate(cge))
class BINARY_AND(BinaryOp):
b_func = {tp.Bool: 'and_', tp.Int: 'and_'}
class BINARY_OR(BinaryOp):
b_func = {tp.Bool: 'or_', tp.Int: 'or_'}
class BINARY_XOR(BinaryOp):
b_func = {tp.Bool: 'xor', tp.Int: 'xor'}
opconst = {}
# Get all contrete subclasses of Bytecode and register them
for name in dir(sys.modules[__name__]):
obj = sys.modules[__name__].__dict__[name]
try:
if issubclass(obj, Bytecode) and len(obj.__abstractmethods__) == 0:
opconst[dis.opmap[name]] = obj
except TypeError:
pass
| 29.912374 | 95 | 0.570959 |
9bb38f2434749a1319e22cbe7fe64f5127f2215a
| 387 |
py
|
Python
|
C_Tut/C_Tut/wsgi.py
|
jaydeep11/C-tutorial-web-application
|
3ec0225efda834fe93a678d887044906124de59b
|
[
"MIT"
] | null | null | null |
C_Tut/C_Tut/wsgi.py
|
jaydeep11/C-tutorial-web-application
|
3ec0225efda834fe93a678d887044906124de59b
|
[
"MIT"
] | null | null | null |
C_Tut/C_Tut/wsgi.py
|
jaydeep11/C-tutorial-web-application
|
3ec0225efda834fe93a678d887044906124de59b
|
[
"MIT"
] | null | null | null |
"""
WSGI config for C_Tut project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'C_Tut.settings')
application = get_wsgi_application()
| 22.764706 | 78 | 0.782946 |
3bb222999fa0ba43d398d4190cd0f65e97bc5944
| 414 |
py
|
Python
|
tests/test_methods/test_reminders.py
|
jackwardell/SlackTime
|
c40be4854a26084e1a368a975e220d613c14d8d8
|
[
"Apache-2.0"
] | 2 |
2020-09-24T00:07:13.000Z
|
2020-09-27T19:27:06.000Z
|
tests/test_methods/test_reminders.py
|
jackwardell/SlackTime
|
c40be4854a26084e1a368a975e220d613c14d8d8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_methods/test_reminders.py
|
jackwardell/SlackTime
|
c40be4854a26084e1a368a975e220d613c14d8d8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
def test_reminders_add(slack_time):
assert slack_time.reminders.add
def test_reminders_complete(slack_time):
assert slack_time.reminders.complete
def test_reminders_delete(slack_time):
assert slack_time.reminders.delete
def test_reminders_info(slack_time):
assert slack_time.reminders.info
def test_reminders_list(slack_time):
assert slack_time.reminders.list
| 18.818182 | 40 | 0.785024 |
e437418eaad1f373f20f46455354c82526bc5573
| 2,317 |
py
|
Python
|
juriscraper/DeferringList.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 228 |
2015-01-23T04:41:39.000Z
|
2022-03-30T09:52:20.000Z
|
juriscraper/DeferringList.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 331 |
2015-01-05T18:53:40.000Z
|
2022-03-29T23:43:30.000Z
|
juriscraper/DeferringList.py
|
EvandoBlanco/juriscraper
|
3d16af258620d4ba1b4827f66ef69e8a2c5a0484
|
[
"BSD-2-Clause"
] | 84 |
2015-01-03T01:19:21.000Z
|
2022-03-01T08:09:32.000Z
|
from juriscraper.AbstractSite import logger
class DeferringList(object):
"""This object can be used to do deferred loading of meta data in the case
that a piece of meta data requires some special work to obtain.
Note that since this inherits from object (rather than list), it won't be
sorted by the _date_sort function. As a result, it's vital that the code
using this object provide the seed data in sorted order. Failure to do so
will result in mixed up data being sent to the caller -- a bad fate.
For an example of how this can be used, see
juriscraper.opinions.united_states.state.tex
"""
def __init__(self, *args, **kwargs):
logger.warning(
"Using DeferringList object which cannot be sorted until "
"fetched. Note that in usual processing, the fetching "
"happens before the sorting, so this is OK."
)
logger.info(
"DeferringList has %s entries to fetch." % len(kwargs["seed"])
)
self._data = kwargs["seed"]
self._fetched_items = [False] * len(kwargs["seed"])
self._fetching_function = kwargs["fetcher"]
def __iter__(self):
for item in range(0, len(self._data)):
if self._fetched_items[item]:
yield self._data[item]
else:
yield self.__getitem__(item)
def __getitem__(self, item):
if self._fetched_items[item]:
return self._data[item]
else:
# Go get the item using the fetching function
logger.info(
"Getting deferred value from seed: %s" % self._data[item]
)
new_val = self._fetching_function(self._data[item])
self._data[item] = new_val
self._fetched_items[item] = True
return new_val
def __setitem__(self, key, value):
if self._fetched_items[key]:
self._data[key] = value
else:
raise AttributeError(
"Cannot set item that has not yet been fetched."
)
def __delitem__(self, item):
del self._data[item]
del self._fetched_items[item]
def __len__(self):
return len(self._data)
def __str__(self):
return "<DeferringList %s>" % self.__dict__
| 34.58209 | 78 | 0.608977 |
7498aa48a6076fa6ddd43850db6855041150a7cd
| 1,409 |
py
|
Python
|
main_precalculate_M_arrayjob.py
|
aalto-ics-kepaco/pairwiseMKL
|
99ebfa880ad57356bf4086f20e38c481672ac115
|
[
"MIT"
] | 9 |
2018-07-10T13:58:52.000Z
|
2020-04-19T16:37:12.000Z
|
main_precalculate_M_arrayjob.py
|
aalto-ics-kepaco/pairwiseMKL
|
99ebfa880ad57356bf4086f20e38c481672ac115
|
[
"MIT"
] | 1 |
2018-10-16T06:04:20.000Z
|
2018-10-16T06:04:20.000Z
|
main_precalculate_M_arrayjob.py
|
aalto-ics-kepaco/pairwiseMKL
|
99ebfa880ad57356bf4086f20e38c481672ac115
|
[
"MIT"
] | 3 |
2019-12-10T08:27:48.000Z
|
2021-08-09T13:13:00.000Z
|
from sys import argv, exit
import os
import numpy as np
from pairwisemkl.learner.compute_M__arrayjob import *
try:
id_in = int(argv[1])
except:
exit()
data_path = './drug_response_data'
# Drug kernels
# Read file names of drug kernels
fn_kd = open(data_path + '/Drug_kernels/Drug_kernel_file_names.txt', 'r')
kd_file_names = fn_kd.readlines()
fn_kd.close()
kd_file_names = [x.split('\n')[0] for x in kd_file_names]
# Prepare a list of drug kernels
kd_list = []
for kd in kd_file_names:
f_kd = open(data_path + '/Drug_kernels/' + kd, 'r')
kd_list.append(np.loadtxt(f_kd))
f_kd.close()
# Cell line kernels
# Read file names of cell line kernels
fn_kc = open(data_path + '/Cell_line_kernels/Cell_kernel_file_names.txt', 'r')
kc_file_names = fn_kc.readlines()
fn_kc.close()
kc_file_names = [x.split('\n')[0] for x in kc_file_names]
kc_list = []
# Prepare a list of cell line kernels
for kc in kc_file_names:
f_kc = open(data_path + '/Cell_line_kernels/' + kc, 'r')
kc_list.append(np.loadtxt(f_kc))
f_kc.close()
# Compute a single row of the matrix M (indexed by an integer id_in)
# Matrix M is needed to optimize pairwise kernel weights
m = compute_M_row(kd_list, kc_list, id_in)
new_path = data_path + "/M"
if not os.path.exists(new_path):
os.makedirs(new_path)
np.savetxt(new_path + '/M__row_'+str(id_in)+'.txt', m, delimiter='\t')
print('\nSuccess!')
| 26.092593 | 78 | 0.706175 |
b11db85820b4c96c3c4d507f6227e886445d42d8
| 3,462 |
py
|
Python
|
web_manage/student/migrations/0001_initial.py
|
chicken-noodle/mysite
|
6973edd1357a2490f7f47e5ce427af5b144fb50a
|
[
"bzip2-1.0.6"
] | 1 |
2019-09-02T02:14:51.000Z
|
2019-09-02T02:14:51.000Z
|
web_manage/student/migrations/0001_initial.py
|
chicken-noodle/mysite
|
6973edd1357a2490f7f47e5ce427af5b144fb50a
|
[
"bzip2-1.0.6"
] | null | null | null |
web_manage/student/migrations/0001_initial.py
|
chicken-noodle/mysite
|
6973edd1357a2490f7f47e5ce427af5b144fb50a
|
[
"bzip2-1.0.6"
] | 1 |
2019-09-02T03:33:06.000Z
|
2019-09-02T03:33:06.000Z
|
# Generated by Django 2.2.3 on 2019-08-28 12:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('all', '0001_initial'),
('competition', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='stu_basic_info',
fields=[
('stu_number', models.CharField(default='0', max_length=25, primary_key=True, serialize=False)),
('stu_name', models.CharField(max_length=25)),
('sex', models.CharField(choices=[('男', '男'), ('女', '女')], max_length=10)),
('ID_number', models.CharField(max_length=25)),
('bank_number', models.CharField(blank=True, max_length=25, null=True)),
('phone_number', models.CharField(blank=True, max_length=25, null=True)),
('email', models.EmailField(blank=True, max_length=255, null=True)),
('photo', models.ImageField(blank=True, null=True, upload_to='photo')),
('stu_card_photo', models.ImageField(blank=True, null=True, upload_to='stu_card_photo')),
('department', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='all.depart_info')),
('grade', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='all.grade_info')),
('major', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='all.major_info')),
('stu_class', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='all.class_info')),
],
),
migrations.CreateModel(
name='temp_com_stu_info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_leader', models.BooleanField(default=0)),
('stu_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='student.stu_basic_info')),
('temp_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='competition.temp_com_group_basic_info')),
],
),
migrations.CreateModel(
name='stu_fllow_com_info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.BooleanField(default=0)),
('com_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='competition.com_basic_info')),
('stu_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='student.stu_basic_info')),
],
),
migrations.CreateModel(
name='com_stu_info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_leader', models.BooleanField(default=0)),
('com_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='competition.com_basic_info')),
('group_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='competition.com_group_basic_info')),
('stu_id', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='student.stu_basic_info')),
],
),
]
| 54.09375 | 136 | 0.616696 |
18657b4cbe05493bf0d70963921435cb3deb7e83
| 276 |
py
|
Python
|
cit-api/pipeline/migrations/0125_merge_20210309_2137.py
|
bcgov/CIT
|
b9db4f169b52e9a6293b3ee1e61935888074215a
|
[
"Apache-2.0"
] | 10 |
2020-11-12T15:13:40.000Z
|
2022-03-05T22:33:08.000Z
|
cit-api/pipeline/migrations/0125_merge_20210309_2137.py
|
bcgov/CIT
|
b9db4f169b52e9a6293b3ee1e61935888074215a
|
[
"Apache-2.0"
] | 28 |
2020-07-17T16:33:55.000Z
|
2022-03-21T16:24:25.000Z
|
cit-api/pipeline/migrations/0125_merge_20210309_2137.py
|
bcgov/CIT
|
b9db4f169b52e9a6293b3ee1e61935888074215a
|
[
"Apache-2.0"
] | 5 |
2020-11-02T23:39:53.000Z
|
2022-03-01T19:09:45.000Z
|
# Generated by Django 2.2.16 on 2021-03-09 21:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0124_auto_20210309_1650'),
('pipeline', '0123_auto_20210303_2236'),
]
operations = [
]
| 18.4 | 48 | 0.652174 |
30d07d1ac5d0e6b9606dd8567a8eb731dd0a3d52
| 564 |
py
|
Python
|
Python_PI/Clase16.py
|
Alex8Navas/PythonPI
|
5f1eff48e8a28f364f5f0dbf25d7a4968a0025bd
|
[
"CC0-1.0"
] | null | null | null |
Python_PI/Clase16.py
|
Alex8Navas/PythonPI
|
5f1eff48e8a28f364f5f0dbf25d7a4968a0025bd
|
[
"CC0-1.0"
] | null | null | null |
Python_PI/Clase16.py
|
Alex8Navas/PythonPI
|
5f1eff48e8a28f364f5f0dbf25d7a4968a0025bd
|
[
"CC0-1.0"
] | null | null | null |
# Clase 16. Curso Píldoras Informáticas.
# Control de Flujo. Bucles 3.
for i in range(5):
print(f"EL valor de la vriable es {i}")
for i in range(0, 50, 5): # De 0 a 50 (excluido) de cinco en cinco
print(f"EL valor de la vriable es {i}")
print(len("Markus"))
print("\nAutentificación del Correo")
Autoriza = False
email = input("Intruduza su mail, por favor: ")
for i in range(len(email)):
if email[i] == "@":
Autoriza = True
if Autoriza == True:
print("El correo es correcto, caballero.")
else:
print("El correo es incorrecto.")
| 24.521739 | 67 | 0.648936 |
438971bb4ff3b0e238e246909afed0bed8b7dab8
| 7,859 |
py
|
Python
|
lib/places_classifier.py
|
joelpob/photos-gallery
|
84417a776485cf699d21c67e92b7fbd77c7fb97a
|
[
"MIT"
] | 2 |
2020-08-08T10:30:47.000Z
|
2020-11-28T21:37:41.000Z
|
lib/places_classifier.py
|
joelpob/photos-gallery
|
84417a776485cf699d21c67e92b7fbd77c7fb97a
|
[
"MIT"
] | null | null | null |
lib/places_classifier.py
|
joelpob/photos-gallery
|
84417a776485cf699d21c67e92b7fbd77c7fb97a
|
[
"MIT"
] | null | null | null |
# PlacesCNN to predict the scene category, attribute, and class activation map in a single pass
# by Bolei Zhou, sep 2, 2017
# updated, making it compatible to pytorch 1.x in a hacky way
# updated, cleaned it up, class'ified it, self contained
import torch
from torch.autograd import Variable as V
import torchvision.models as models
from torchvision import transforms as trn
from torch.nn import functional as F
from typing import List
import os
import re
import numpy as np
import cv2
from PIL import Image
class PlacesClassifier():
def __init__(self, models_directory):
self.features_blobs = []
self.classes = None
self.labels_IO = None
self.labels_attribute = None
self.W_attribute = None
self.models_directory = models_directory
self.classes, self.labels_IO, self.labels_attribute, self.W_attribute = self.load_labels()
self.model = self.load_model()
self.tf = self.returnTF()
def recursion_change_bn(self, module):
if isinstance(module, torch.nn.BatchNorm2d):
module.track_running_stats = 1
else:
for i, (name, module1) in enumerate(module._modules.items()):
module1 = self.recursion_change_bn(module1)
return module
def load_labels(self):
# prepare all the labels
# scene category relevant
file_name_category = self.models_directory + '/categories_places365.txt'
if not os.access(file_name_category, os.W_OK):
synset_url = 'https://raw.githubusercontent.com/csailvision/places365/master/categories_places365.txt'
os.system('wget -P {} '.format(self.models_directory) + synset_url)
classes = list()
with open(file_name_category) as class_file:
for line in class_file:
classes.append(line.strip().split(' ')[0][3:])
classes = tuple(classes)
# indoor and outdoor relevant
file_name_IO = self.models_directory + '/IO_places365.txt'
if not os.access(file_name_IO, os.W_OK):
synset_url = 'https://raw.githubusercontent.com/csailvision/places365/master/IO_places365.txt'
os.system('wget -P {} '.format(self.models_directory) + synset_url)
with open(file_name_IO) as f:
lines = f.readlines()
labels_IO = []
for line in lines:
items = line.rstrip().split()
labels_IO.append(int(items[-1]) - 1) # 0 is indoor, 1 is outdoor
labels_IO = np.array(labels_IO)
# scene attribute relevant
file_name_attribute = self.models_directory + '/labels_sunattribute.txt'
if not os.access(file_name_attribute, os.W_OK):
synset_url = 'https://raw.githubusercontent.com/csailvision/places365/master/labels_sunattribute.txt'
os.system('wget -P {} '.format(self.models_directory) + synset_url)
with open(file_name_attribute) as f:
lines = f.readlines()
labels_attribute = [item.rstrip() for item in lines]
file_name_W = self.models_directory + '/W_sceneattribute_wideresnet18.npy'
if not os.access(file_name_W, os.W_OK):
synset_url = 'http://places2.csail.mit.edu/models_places365/W_sceneattribute_wideresnet18.npy'
os.system('wget -P {} '.format(self.models_directory) + synset_url)
W_attribute = np.load(file_name_W)
return classes, labels_IO, labels_attribute, W_attribute
def hook_feature(self, module, input, output):
self.features_blobs.append(np.squeeze(output.data.cpu().numpy()))
def returnCAM(self, feature_conv, weight_softmax, class_idx):
# generate the class activation maps upsample to 256x256
size_upsample = (256, 256)
nc, h, w = feature_conv.shape
output_cam = []
for idx in class_idx:
cam = weight_softmax[class_idx].dot(feature_conv.reshape((nc, h * w)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
output_cam.append(cv2.resize(cam_img, size_upsample))
return output_cam
def returnTF(self):
tf = trn.Compose([
trn.Resize((224, 224)),
trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
return tf
def load_model(self):
if not os.path.isdir(self.models_directory):
os.mkdir(self.models_directory)
model_file = 'wideresnet18_places365.pth.tar'
if not os.access(self.models_directory + '/' + model_file, os.W_OK):
os.system('wget -P {} http://places2.csail.mit.edu/models_places365/'
.format(self.models_directory) + model_file)
os.system('wget -P {} https://raw.githubusercontent.com/csailvision/places365/master/wideresnet.py'
.format(self.models_directory))
import models.wideresnet
model = models.wideresnet.resnet18(num_classes=365)
checkpoint = torch.load(self.models_directory + '/' + model_file,
map_location=lambda storage, loc: storage)
state_dict = {str.replace(k, 'module.', ''): v for k, v in checkpoint['state_dict'].items()}
model.load_state_dict(state_dict)
# hacky way to deal with the upgraded batchnorm2D and avgpool layers...
for i, (name, module) in enumerate(model._modules.items()):
module = self.recursion_change_bn(model)
model.avgpool = torch.nn.AvgPool2d(kernel_size=14, stride=1, padding=0)
model.eval()
model.eval()
# hook the feature extractor
features_names = ['layer4', 'avgpool'] # this is the last conv layer of the resnet
for name in features_names:
model._modules.get(name).register_forward_hook(self.hook_feature)
return model
def forward(self, img: Image) -> List[str]:
attributes = ['clouds',
'biking',
'swimming',
'driving',
'sunny',
'leaves',
'snow',
'trees',
'climbing',
'hiking',
'rugged',
'ocean',
'scene']
# load the model
tokens = []
# get the softmax weight
params = list(self.model.parameters())
weight_softmax = params[-2].data.numpy()
weight_softmax[weight_softmax < 0] = 0
input_img = V(self.tf(img).unsqueeze(0))
# forward pass
logit = self.model.forward(input_img)
h_x = F.softmax(logit, 1).data.squeeze()
probs, idx = h_x.sort(0, True)
probs = probs.numpy()
idx = idx.numpy()
# output the IO prediction
io_image = np.mean(self.labels_IO[idx[:10]]) # vote for the indoor or outdoor
if io_image < 0.5:
tokens.append('indoor')
else:
tokens.append('outdoor')
# output the prediction of scene category
for i in range(0, 5):
if probs[i] > 0.25:
tokens.append(self.classes[idx[i]])
# output the scene attributes
responses_attribute = self.W_attribute.dot(self.features_blobs[1])
self.features_blobs = []
idx_a = np.argsort(responses_attribute)
for i in range(-1, -10, -1):
t = self.labels_attribute[idx_a[i]]
if t in attributes:
tokens.append(self.labels_attribute[idx_a[i]])
result = []
for token in tokens:
for t in re.split('[, /_-]+', token):
result.append(t)
return list(set(result))
| 40.510309 | 114 | 0.602494 |
30025dfaca3723d9418bc34042fbaaf85ab81137
| 5,929 |
py
|
Python
|
tests/unit/modules/test_win_dns_client.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1 |
2019-09-17T17:48:55.000Z
|
2019-09-17T17:48:55.000Z
|
tests/unit/modules/test_win_dns_client.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
tests/unit/modules/test_win_dns_client.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <[email protected]>
'''
# Import Python Libs
from __future__ import absolute_import, unicode_literals, print_function
import types
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import MagicMock, patch, Mock
# Import Salt Libs
import salt.modules.win_dns_client as win_dns_client
import salt.utils.stringutils
try:
import wmi
HAS_WMI = True
except ImportError:
HAS_WMI = False
class Mockwmi(object):
'''
Mock wmi class
'''
NetConnectionID = 'Local Area Connection'
Index = 0
DNSServerSearchOrder = ['10.1.1.10']
Description = 'Local Area Connection'
DHCPEnabled = True
def __init__(self):
pass
class Mockwinapi(object):
'''
Mock winapi class
'''
def __init__(self):
pass
class winapi(object):
'''
Mock winapi class
'''
def __init__(self):
pass
@staticmethod
def Com():
'''
Mock Com method
'''
return True
@skipIf(not HAS_WMI, 'WMI only available on Windows')
class WinDnsClientTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.win_dns_client
'''
def setup_loader_modules(self):
# wmi and pythoncom modules are platform specific...
mock_pythoncom = types.ModuleType(
salt.utils.stringutils.to_str('pythoncom')
)
sys_modules_patcher = patch.dict('sys.modules',
{'pythoncom': mock_pythoncom})
sys_modules_patcher.start()
self.addCleanup(sys_modules_patcher.stop)
self.WMI = Mock()
self.addCleanup(delattr, self, 'WMI')
return {win_dns_client: {'wmi': wmi}}
# 'get_dns_servers' function tests: 1
def test_get_dns_servers(self):
'''
Test if it return a list of the configured DNS servers
of the specified interface.
'''
with patch('salt.utils.winapi.Com', MagicMock()), \
patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]), \
patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertListEqual(win_dns_client.get_dns_servers
('Local Area Connection'),
['10.1.1.10'])
self.assertFalse(win_dns_client.get_dns_servers('Ethernet'))
# 'rm_dns' function tests: 1
def test_rm_dns(self):
'''
Test if it remove the DNS server from the network interface.
'''
with patch.dict(win_dns_client.__salt__,
{'cmd.retcode': MagicMock(return_value=0)}):
self.assertTrue(win_dns_client.rm_dns('10.1.1.10'))
# 'add_dns' function tests: 1
def test_add_dns(self):
'''
Test if it add the DNS server to the network interface.
'''
with patch('salt.utils.winapi.Com', MagicMock()), \
patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]), \
patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertFalse(win_dns_client.add_dns('10.1.1.10', 'Ethernet'))
self.assertTrue(win_dns_client.add_dns('10.1.1.10', 'Local Area Connection'))
with patch.object(win_dns_client, 'get_dns_servers',
MagicMock(return_value=['10.1.1.10'])), \
patch.dict(win_dns_client.__salt__,
{'cmd.retcode': MagicMock(return_value=0)}), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertTrue(win_dns_client.add_dns('10.1.1.0', 'Local Area Connection'))
# 'dns_dhcp' function tests: 1
def test_dns_dhcp(self):
'''
Test if it configure the interface to get its
DNS servers from the DHCP server
'''
with patch.dict(win_dns_client.__salt__,
{'cmd.retcode': MagicMock(return_value=0)}):
self.assertTrue(win_dns_client.dns_dhcp())
# 'get_dns_config' function tests: 1
def test_get_dns_config(self):
'''
Test if it get the type of DNS configuration (dhcp / static)
'''
with patch('salt.utils.winapi.Com', MagicMock()), \
patch.object(self.WMI, 'Win32_NetworkAdapter',
return_value=[Mockwmi()]), \
patch.object(self.WMI, 'Win32_NetworkAdapterConfiguration',
return_value=[Mockwmi()]), \
patch.object(wmi, 'WMI', Mock(return_value=self.WMI)):
self.assertTrue(win_dns_client.get_dns_config())
@patch('salt.utils.platform.is_windows')
def test___virtual__non_windows(self, mock):
mock.return_value = False
result = win_dns_client.__virtual__()
expected = (False, 'Module win_dns_client: module only works on '
'Windows systems')
self.assertEqual(result, expected)
@patch.object(win_dns_client, 'HAS_LIBS', False)
def test___virtual__missing_libs(self):
result = win_dns_client.__virtual__()
expected = (False, 'Module win_dns_client: missing required libraries')
self.assertEqual(result, expected)
def test___virtual__(self):
result = win_dns_client.__virtual__()
expected = 'win_dns_client'
self.assertEqual(result, expected)
| 33.88 | 89 | 0.595885 |
c0111ce7dd44450e33d4743ee55a93c0754446bd
| 51,634 |
py
|
Python
|
nova/conf/compute.py
|
adallaway/nova
|
e73000e9e0699f9396bba3c7e87789d85ca59a2d
|
[
"Apache-2.0"
] | 1 |
2021-06-10T17:08:14.000Z
|
2021-06-10T17:08:14.000Z
|
nova/conf/compute.py
|
adallaway/nova
|
e73000e9e0699f9396bba3c7e87789d85ca59a2d
|
[
"Apache-2.0"
] | null | null | null |
nova/conf/compute.py
|
adallaway/nova
|
e73000e9e0699f9396bba3c7e87789d85ca59a2d
|
[
"Apache-2.0"
] | null | null | null |
# needs:check_deprecation_status
# Copyright 2015 Huawei Technology corp.
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from oslo_config import cfg
from oslo_config import types
from nova.conf import paths
compute_group = cfg.OptGroup(
'compute',
title='Compute Manager Options',
help="""
A collection of options specific to the nova-compute service.
""")
compute_opts = [
cfg.StrOpt('compute_driver',
help="""
Defines which driver to use for controlling virtualization.
Possible values:
* ``libvirt.LibvirtDriver``
* ``fake.FakeDriver``
* ``ironic.IronicDriver``
* ``vmwareapi.VMwareVCDriver``
* ``hyperv.HyperVDriver``
* ``powervm.PowerVMDriver``
* ``zvm.ZVMDriver``
"""),
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help="""
Allow destination machine to match source for resize. Useful when
testing in single-host environments. By default it is not allowed
to resize to the same host. Setting this option to true will add
the same host to the destination options. Also set to true
if you allow the ServerGroupAffinityFilter and need to resize.
"""),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova', 'bittorrent'],
help="""
Image properties that should not be inherited from the instance
when taking a snapshot.
This option gives an opportunity to select which image-properties
should not be inherited by newly created snapshots.
.. note::
The following image properties are *never* inherited regardless of
whether they are listed in this configuration option or not:
* cinder_encryption_key_id
* cinder_encryption_key_deletion_policy
* img_signature
* img_signature_hash_method
* img_signature_key_type
* img_signature_certificate_uuid
Possible values:
* A comma-separated list whose item is an image property. Usually only
the image properties that are only needed by base images can be included
here, since the snapshots that are created from the base images don't
need them.
* Default list: cache_in_nova, bittorrent
"""),
cfg.IntOpt('max_local_block_devices',
default=3,
help="""
Maximum number of devices that will result in a local image being
created on the hypervisor node.
A negative number means unlimited. Setting ``max_local_block_devices``
to 0 means that any request that attempts to create a local disk
will fail. This option is meant to limit the number of local discs
(so root local disc that is the result of ``imageRef`` being used when
creating a server, and any other ephemeral and swap disks). 0 does not
mean that images will be automatically converted to volumes and boot
instances from volumes - it just means that all requests that attempt
to create a local disk will fail.
Possible values:
* 0: Creating a local disk is not allowed.
* Negative number: Allows unlimited number of local discs.
* Positive number: Allows only these many number of local discs.
"""),
cfg.ListOpt('compute_monitors',
default=[],
help="""
A comma-separated list of monitors that can be used for getting
compute metrics. You can use the alias/name from the setuptools
entry points for nova.compute.monitors.* namespaces. If no
namespace is supplied, the "cpu." namespace is assumed for
backwards-compatibility.
NOTE: Only one monitor per namespace (For example: cpu) can be loaded at
a time.
Possible values:
* An empty list will disable the feature (Default).
* An example value that would enable the CPU
bandwidth monitor that uses the virt driver variant::
compute_monitors = cpu.virt_driver
"""),
cfg.StrOpt('default_ephemeral_format',
help="""
The default format an ephemeral_volume will be formatted with on creation.
Possible values:
* ``ext2``
* ``ext3``
* ``ext4``
* ``xfs``
* ``ntfs`` (only for Windows guests)
"""),
cfg.BoolOpt('vif_plugging_is_fatal',
default=True,
help="""
Determine if instance should boot or fail on VIF plugging timeout.
Nova sends a port update to Neutron after an instance has been scheduled,
providing Neutron with the necessary information to finish setup of the port.
Once completed, Neutron notifies Nova that it has finished setting up the
port, at which point Nova resumes the boot of the instance since network
connectivity is now supposed to be present. A timeout will occur if the reply
is not received after a given interval.
This option determines what Nova does when the VIF plugging timeout event
happens. When enabled, the instance will error out. When disabled, the
instance will continue to boot on the assumption that the port is ready.
Possible values:
* True: Instances should fail after VIF plugging timeout
* False: Instances should continue booting after VIF plugging timeout
"""),
cfg.IntOpt('vif_plugging_timeout',
default=300,
min=0,
help="""
Timeout for Neutron VIF plugging event message arrival.
Number of seconds to wait for Neutron vif plugging events to
arrive before continuing or failing (see 'vif_plugging_is_fatal').
If you are hitting timeout failures at scale, consider running rootwrap
in "daemon mode" in the neutron agent via the ``[agent]/root_helper_daemon``
neutron configuration option.
Related options:
* vif_plugging_is_fatal - If ``vif_plugging_timeout`` is set to zero and
``vif_plugging_is_fatal`` is False, events should not be expected to
arrive at all.
"""),
cfg.IntOpt('arq_binding_timeout',
default=300,
min=1,
help="""
Timeout for Accelerator Request (ARQ) bind event message arrival.
Number of seconds to wait for ARQ bind resolution event to arrive.
The event indicates that every ARQ for an instance has either bound
successfully or failed to bind. If it does not arrive, instance bringup
is aborted with an exception.
"""),
cfg.StrOpt('injected_network_template',
default=paths.basedir_def('nova/virt/interfaces.template'),
help="""Path to '/etc/network/interfaces' template.
The path to a template file for the '/etc/network/interfaces'-style file, which
will be populated by nova and subsequently used by cloudinit. This provides a
method to configure network connectivity in environments without a DHCP server.
The template will be rendered using Jinja2 template engine, and receive a
top-level key called ``interfaces``. This key will contain a list of
dictionaries, one for each interface.
Refer to the cloudinit documentaion for more information:
https://cloudinit.readthedocs.io/en/latest/topics/datasources.html
Possible values:
* A path to a Jinja2-formatted template for a Debian '/etc/network/interfaces'
file. This applies even if using a non Debian-derived guest.
Related options:
* ``flat_inject``: This must be set to ``True`` to ensure nova embeds network
configuration information in the metadata provided through the config drive.
"""),
cfg.StrOpt('preallocate_images',
default='none',
choices=[
('none', 'No storage provisioning is done up front'),
('space', 'Storage is fully allocated at instance start')
],
help="""
The image preallocation mode to use.
Image preallocation allows storage for instance images to be allocated up front
when the instance is initially provisioned. This ensures immediate feedback is
given if enough space isn't available. In addition, it should significantly
improve performance on writes to new blocks and may even improve I/O
performance to prewritten blocks due to reduced fragmentation.
"""),
cfg.BoolOpt('use_cow_images',
default=True,
help="""
Enable use of copy-on-write (cow) images.
QEMU/KVM allow the use of qcow2 as backing files. By disabling this,
backing files will not be used.
"""),
cfg.BoolOpt('force_raw_images',
default=True,
help="""
Force conversion of backing images to raw format.
Possible values:
* True: Backing image files will be converted to raw image format
* False: Backing image files will not be converted
Related options:
* ``compute_driver``: Only the libvirt driver uses this option.
* ``[libvirt]/images_type``: If images_type is rbd, setting this option
to False is not allowed. See the bug
https://bugs.launchpad.net/nova/+bug/1816686 for more details.
"""),
# NOTE(yamahata): ListOpt won't work because the command may include a comma.
# For example:
#
# mkfs.ext4 -O dir_index,extent -E stride=8,stripe-width=16
# --label %(fs_label)s %(target)s
#
# list arguments are comma separated and there is no way to escape such
# commas.
cfg.MultiStrOpt('virt_mkfs',
default=[],
help="""
Name of the mkfs commands for ephemeral device.
The format is <os_type>=<mkfs command>
"""),
cfg.BoolOpt('resize_fs_using_block_device',
default=False,
help="""
Enable resizing of filesystems via a block device.
If enabled, attempt to resize the filesystem by accessing the image over a
block device. This is done by the host and may not be necessary if the image
contains a recent version of cloud-init. Possible mechanisms require the nbd
driver (for qcow and raw), or loop (for raw).
"""),
cfg.IntOpt('timeout_nbd',
default=10,
min=0,
help='Amount of time, in seconds, to wait for NBD device start up.'),
cfg.StrOpt('pointer_model',
default='usbtablet',
choices=[
('ps2mouse', 'Uses relative movement. Mouse connected by PS2'),
('usbtablet', 'Uses absolute movement. Tablet connect by USB'),
(None, 'Uses default behavior provided by drivers (mouse on PS2 '
'for libvirt x86)'),
],
help="""
Generic property to specify the pointer type.
Input devices allow interaction with a graphical framebuffer. For
example to provide a graphic tablet for absolute cursor movement.
If set, the 'hw_pointer_model' image property takes precedence over
this configuration option.
Related options:
* usbtablet must be configured with VNC enabled or SPICE enabled and SPICE
agent disabled. When used with libvirt the instance mode should be
configured as HVM.
"""),
]
resource_tracker_opts = [
cfg.StrOpt('vcpu_pin_set',
deprecated_for_removal=True,
deprecated_since='20.0.0',
deprecated_reason="""
This option has been superseded by the ``[compute] cpu_dedicated_set`` and
``[compute] cpu_shared_set`` options, which allow things like the co-existence
of pinned and unpinned instances on the same host (for the libvirt driver).
""",
help="""
Mask of host CPUs that can be used for ``VCPU`` resources.
The behavior of this option depends on the definition of the ``[compute]
cpu_dedicated_set`` option and affects the behavior of the ``[compute]
cpu_shared_set`` option.
* If ``[compute] cpu_dedicated_set`` is defined, defining this option will
result in an error.
* If ``[compute] cpu_dedicated_set`` is not defined, this option will be used
to determine inventory for ``VCPU`` resources and to limit the host CPUs
that both pinned and unpinned instances can be scheduled to, overriding the
``[compute] cpu_shared_set`` option.
Possible values:
* A comma-separated list of physical CPU numbers that virtual CPUs can be
allocated from. Each element should be either a single CPU number, a range of
CPU numbers, or a caret followed by a CPU number to be excluded from a
previous range. For example::
vcpu_pin_set = "4-12,^8,15"
Related options:
* ``[compute] cpu_dedicated_set``
* ``[compute] cpu_shared_set``
"""),
cfg.MultiOpt('reserved_huge_pages',
item_type=types.Dict(),
help="""
Number of huge/large memory pages to reserved per NUMA host cell.
Possible values:
* A list of valid key=value which reflect NUMA node ID, page size
(Default unit is KiB) and number of pages to be reserved. For example::
reserved_huge_pages = node:0,size:2048,count:64
reserved_huge_pages = node:1,size:1GB,count:1
In this example we are reserving on NUMA node 0 64 pages of 2MiB
and on NUMA node 1 1 page of 1GiB.
"""),
cfg.IntOpt('reserved_host_disk_mb',
min=0,
default=0,
help="""
Amount of disk resources in MB to make them always available to host. The
disk usage gets reported back to the scheduler from nova-compute running
on the compute nodes. To prevent the disk resources from being considered
as available, this option can be used to reserve disk space for that host.
Possible values:
* Any positive integer representing amount of disk in MB to reserve
for the host.
"""),
cfg.IntOpt('reserved_host_memory_mb',
default=512,
min=0,
help="""
Amount of memory in MB to reserve for the host so that it is always available
to host processes. The host resources usage is reported back to the scheduler
continuously from nova-compute running on the compute node. To prevent the host
memory from being considered as available, this option is used to reserve
memory for the host.
Possible values:
* Any positive integer representing amount of memory in MB to reserve
for the host.
"""),
cfg.IntOpt('reserved_host_cpus',
default=0,
min=0,
help="""
Number of host CPUs to reserve for host processes.
The host resources usage is reported back to the scheduler continuously from
nova-compute running on the compute node. This value is used to determine the
``reserved`` value reported to placement.
This option cannot be set if the ``[compute] cpu_shared_set`` or ``[compute]
cpu_dedicated_set`` config options have been defined. When these options are
defined, any host CPUs not included in these values are considered reserved for
the host.
Possible values:
* Any positive integer representing number of physical CPUs to reserve
for the host.
Related options:
* ``[compute] cpu_shared_set``
* ``[compute] cpu_dedicated_set``
"""),
]
allocation_ratio_opts = [
cfg.FloatOpt('cpu_allocation_ratio',
default=None,
min=0.0,
help="""
Virtual CPU to physical CPU allocation ratio.
This option is used to influence the hosts selected by the Placement API by
configuring the allocation ratio for ``VCPU`` inventory. In addition, the
``AggregateCoreFilter`` (deprecated) will fall back to this configuration value
if no per-aggregate setting is found.
.. note::
This option does not affect ``PCPU`` inventory, which cannot be
overcommitted.
.. note::
If this option is set to something *other than* ``None`` or ``0.0``, the
allocation ratio will be overwritten by the value of this option, otherwise,
the allocation ratio will not change. Once set to a non-default value, it is
not possible to "unset" the config to get back to the default behavior. If
you want to reset back to the initial value, explicitly specify it to the
value of ``initial_cpu_allocation_ratio``.
Possible values:
* Any valid positive integer or float value
Related options:
* ``initial_cpu_allocation_ratio``
"""),
cfg.FloatOpt('ram_allocation_ratio',
default=None,
min=0.0,
help="""
Virtual RAM to physical RAM allocation ratio.
This option is used to influence the hosts selected by the Placement API by
configuring the allocation ratio for ``MEMORY_MB`` inventory. In addition, the
``AggregateRamFilter`` (deprecated) will fall back to this configuration value
if no per-aggregate setting is found.
.. note::
If this option is set to something *other than* ``None`` or ``0.0``, the
allocation ratio will be overwritten by the value of this option, otherwise,
the allocation ratio will not change. Once set to a non-default value, it is
not possible to "unset" the config to get back to the default behavior. If
you want to reset back to the initial value, explicitly specify it to the
value of ``initial_ram_allocation_ratio``.
Possible values:
* Any valid positive integer or float value
Related options:
* ``initial_ram_allocation_ratio``
"""),
cfg.FloatOpt('disk_allocation_ratio',
default=None,
min=0.0,
help="""
Virtual disk to physical disk allocation ratio.
This option is used to influence the hosts selected by the Placement API by
configuring the allocation ratio for ``DISK_GB`` inventory. In addition, the
``AggregateDiskFilter`` (deprecated) will fall back to this configuration value
if no per-aggregate setting is found.
When configured, a ratio greater than 1.0 will result in over-subscription of
the available physical disk, which can be useful for more efficiently packing
instances created with images that do not use the entire virtual disk, such as
sparse or compressed images. It can be set to a value between 0.0 and 1.0 in
order to preserve a percentage of the disk for uses other than instances.
.. note::
If the value is set to ``>1``, we recommend keeping track of the free disk
space, as the value approaching ``0`` may result in the incorrect
functioning of instances using it at the moment.
.. note::
If this option is set to something *other than* ``None`` or ``0.0``, the
allocation ratio will be overwritten by the value of this option, otherwise,
the allocation ratio will not change. Once set to a non-default value, it is
not possible to "unset" the config to get back to the default behavior. If
you want to reset back to the initial value, explicitly specify it to the
value of ``initial_disk_allocation_ratio``.
Possible values:
* Any valid positive integer or float value
Related options:
* ``initial_disk_allocation_ratio``
"""),
cfg.FloatOpt('initial_cpu_allocation_ratio',
default=16.0,
min=0.0,
help="""
Initial virtual CPU to physical CPU allocation ratio.
This is only used when initially creating the ``computes_nodes`` table record
for a given nova-compute service.
See https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html
for more details and usage scenarios.
Related options:
* ``cpu_allocation_ratio``
"""),
cfg.FloatOpt('initial_ram_allocation_ratio',
default=1.5,
min=0.0,
help="""
Initial virtual RAM to physical RAM allocation ratio.
This is only used when initially creating the ``computes_nodes`` table record
for a given nova-compute service.
See https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html
for more details and usage scenarios.
Related options:
* ``ram_allocation_ratio``
"""),
cfg.FloatOpt('initial_disk_allocation_ratio',
default=1.0,
min=0.0,
help="""
Initial virtual disk to physical disk allocation ratio.
This is only used when initially creating the ``computes_nodes`` table record
for a given nova-compute service.
See https://docs.openstack.org/nova/latest/admin/configuration/schedulers.html
for more details and usage scenarios.
Related options:
* ``disk_allocation_ratio``
""")
]
compute_manager_opts = [
cfg.StrOpt('console_host',
default=socket.gethostname(),
sample_default="<current_hostname>",
help="""
Console proxy host to be used to connect to instances on this host. It is the
publicly visible name for the console host.
Possible values:
* Current hostname (default) or any string representing hostname.
"""),
cfg.StrOpt('default_access_ip_network_name',
help="""
Name of the network to be used to set access IPs for instances. If there are
multiple IPs to choose from, an arbitrary one will be chosen.
Possible values:
* None (default)
* Any string representing network name.
"""),
cfg.StrOpt('instances_path',
default=paths.state_path_def('instances'),
sample_default="$state_path/instances",
help="""
Specifies where instances are stored on the hypervisor's disk.
It can point to locally attached storage or a directory on NFS.
Possible values:
* $state_path/instances where state_path is a config option that specifies
the top-level directory for maintaining nova's state. (default) or
Any string representing directory path.
Related options:
* ``[workarounds]/ensure_libvirt_rbd_instance_dir_cleanup``
"""),
cfg.BoolOpt('instance_usage_audit',
default=False,
help="""
This option enables periodic compute.instance.exists notifications. Each
compute node must be configured to generate system usage data. These
notifications are consumed by OpenStack Telemetry service.
"""),
cfg.IntOpt('live_migration_retry_count',
default=30,
min=0,
help="""
Maximum number of 1 second retries in live_migration. It specifies number
of retries to iptables when it complains. It happens when an user continuously
sends live-migration request to same host leading to concurrent request
to iptables.
Possible values:
* Any positive integer representing retry count.
"""),
cfg.BoolOpt('resume_guests_state_on_host_boot',
default=False,
help="""
This option specifies whether to start guests that were running before the
host rebooted. It ensures that all of the instances on a Nova compute node
resume their state each time the compute node boots or restarts.
"""),
cfg.IntOpt('network_allocate_retries',
default=0,
min=0,
help="""
Number of times to retry network allocation. It is required to attempt network
allocation retries if the virtual interface plug fails.
Possible values:
* Any positive integer representing retry count.
"""),
cfg.IntOpt('max_concurrent_builds',
default=10,
min=0,
help="""
Limits the maximum number of instance builds to run concurrently by
nova-compute. Compute service can attempt to build an infinite number of
instances, if asked to do so. This limit is enforced to avoid building
unlimited instance concurrently on a compute node. This value can be set
per compute node.
Possible Values:
* 0 : treated as unlimited.
* Any positive integer representing maximum concurrent builds.
"""),
cfg.IntOpt('max_concurrent_snapshots',
default=5,
min=0,
help="""
Maximum number of instance snapshot operations to run concurrently.
This limit is enforced to prevent snapshots overwhelming the
host/network/storage and causing failure. This value can be set per
compute node.
Possible Values:
* 0 : treated as unlimited.
* Any positive integer representing maximum concurrent snapshots.
"""),
cfg.IntOpt('max_concurrent_live_migrations',
default=1,
min=0,
help="""
Maximum number of live migrations to run concurrently. This limit is enforced
to avoid outbound live migrations overwhelming the host/network and causing
failures. It is not recommended that you change this unless you are very sure
that doing so is safe and stable in your environment.
Possible values:
* 0 : treated as unlimited.
* Any positive integer representing maximum number of live migrations
to run concurrently.
"""),
cfg.IntOpt('block_device_allocate_retries',
default=60,
min=0,
help="""
The number of times to check for a volume to be "available" before attaching
it during server create.
When creating a server with block device mappings where ``source_type`` is
one of ``blank``, ``image`` or ``snapshot`` and the ``destination_type`` is
``volume``, the ``nova-compute`` service will create a volume and then attach
it to the server. Before the volume can be attached, it must be in status
"available". This option controls how many times to check for the created
volume to be "available" before it is attached.
If the operation times out, the volume will be deleted if the block device
mapping ``delete_on_termination`` value is True.
It is recommended to configure the image cache in the block storage service
to speed up this operation. See
https://docs.openstack.org/cinder/latest/admin/blockstorage-image-volume-cache.html
for details.
Possible values:
* 60 (default)
* If value is 0, then one attempt is made.
* For any value > 0, total attempts are (value + 1)
Related options:
* ``block_device_allocate_retries_interval`` - controls the interval between
checks
"""),
cfg.IntOpt('sync_power_state_pool_size',
default=1000,
help="""
Number of greenthreads available for use to sync power states.
This option can be used to reduce the number of concurrent requests
made to the hypervisor or system with real instance power states
for performance reasons, for example, with Ironic.
Possible values:
* Any positive integer representing greenthreads count.
""")
]
compute_group_opts = [
cfg.IntOpt('consecutive_build_service_disable_threshold',
default=10,
help="""
Enables reporting of build failures to the scheduler.
Any nonzero value will enable sending build failure statistics to the
scheduler for use by the BuildFailureWeigher.
Possible values:
* Any positive integer enables reporting build failures.
* Zero to disable reporting build failures.
Related options:
* [filter_scheduler]/build_failure_weight_multiplier
"""),
cfg.IntOpt("shutdown_retry_interval",
default=10,
min=1,
help="""
Time to wait in seconds before resending an ACPI shutdown signal to
instances.
The overall time to wait is set by ``shutdown_timeout``.
Possible values:
* Any integer greater than 0 in seconds
Related options:
* ``shutdown_timeout``
"""),
cfg.IntOpt('resource_provider_association_refresh',
default=300,
min=0,
mutable=True,
# TODO(efried): Provide more/better explanation of what this option is
# all about. Reference bug(s). Unless we're just going to remove it.
help="""
Interval for updating nova-compute-side cache of the compute node resource
provider's inventories, aggregates, and traits.
This option specifies the number of seconds between attempts to update a
provider's inventories, aggregates and traits in the local cache of the compute
node.
A value of zero disables cache refresh completely.
The cache can be cleared manually at any time by sending SIGHUP to the compute
process, causing it to be repopulated the next time the data is accessed.
Possible values:
* Any positive integer in seconds, or zero to disable refresh.
"""),
cfg.StrOpt('cpu_shared_set',
help="""
Mask of host CPUs that can be used for ``VCPU`` resources and offloaded
emulator threads.
The behavior of this option depends on the definition of the deprecated
``vcpu_pin_set`` option.
* If ``vcpu_pin_set`` is not defined, ``[compute] cpu_shared_set`` will be be
used to provide ``VCPU`` inventory and to determine the host CPUs that
unpinned instances can be scheduled to. It will also be used to determine the
host CPUS that instance emulator threads should be offloaded to for instances
configured with the ``share`` emulator thread policy
(``hw:emulator_threads_policy=share``).
* If ``vcpu_pin_set`` is defined, ``[compute] cpu_shared_set`` will only be
used to determine the host CPUs that instance emulator threads should be
offloaded to for instances configured with the ``share`` emulator thread
policy (``hw:emulator_threads_policy=share``). ``vcpu_pin_set`` will be used
to provide ``VCPU`` inventory and to determine the host CPUs that both pinned
and unpinned instances can be scheduled to.
This behavior will be simplified in a future release when ``vcpu_pin_set`` is
removed.
Possible values:
* A comma-separated list of physical CPU numbers that instance VCPUs can be
allocated from. Each element should be either a single CPU number, a range of
CPU numbers, or a caret followed by a CPU number to be excluded from a
previous range. For example::
cpu_shared_set = "4-12,^8,15"
Related options:
* ``[compute] cpu_dedicated_set``: This is the counterpart option for defining
where ``PCPU`` resources should be allocated from.
* ``vcpu_pin_set``: A legacy option whose definition may change the behavior of
this option.
"""),
cfg.StrOpt('cpu_dedicated_set',
help="""
Mask of host CPUs that can be used for ``PCPU`` resources.
The behavior of this option affects the behavior of the deprecated
``vcpu_pin_set`` option.
* If this option is defined, defining ``vcpu_pin_set`` will result in an error.
* If this option is not defined, ``vcpu_pin_set`` will be used to determine
inventory for ``VCPU`` resources and to limit the host CPUs that both pinned
and unpinned instances can be scheduled to.
This behavior will be simplified in a future release when ``vcpu_pin_set`` is
removed.
Possible values:
* A comma-separated list of physical CPU numbers that instance VCPUs can be
allocated from. Each element should be either a single CPU number, a range of
CPU numbers, or a caret followed by a CPU number to be excluded from a
previous range. For example::
cpu_dedicated_set = "4-12,^8,15"
Related options:
* ``[compute] cpu_shared_set``: This is the counterpart option for defining
where ``VCPU`` resources should be allocated from.
* ``vcpu_pin_set``: A legacy option that this option partially replaces.
"""),
cfg.BoolOpt('live_migration_wait_for_vif_plug',
default=True,
help="""
Determine if the source compute host should wait for a ``network-vif-plugged``
event from the (neutron) networking service before starting the actual transfer
of the guest to the destination compute host.
Note that this option is read on the destination host of a live migration.
If you set this option the same on all of your compute hosts, which you should
do if you use the same networking backend universally, you do not have to
worry about this.
Before starting the transfer of the guest, some setup occurs on the destination
compute host, including plugging virtual interfaces. Depending on the
networking backend **on the destination host**, a ``network-vif-plugged``
event may be triggered and then received on the source compute host and the
source compute can wait for that event to ensure networking is set up on the
destination host before starting the guest transfer in the hypervisor.
.. note::
The compute service cannot reliably determine which types of virtual
interfaces (``port.binding:vif_type``) will send ``network-vif-plugged``
events without an accompanying port ``binding:host_id`` change.
Open vSwitch and linuxbridge should be OK, but OpenDaylight is at least
one known backend that will not currently work in this case, see bug
https://launchpad.net/bugs/1755890 for more details.
Possible values:
* True: wait for ``network-vif-plugged`` events before starting guest transfer
* False: do not wait for ``network-vif-plugged`` events before starting guest
transfer (this is the legacy behavior)
Related options:
* [DEFAULT]/vif_plugging_is_fatal: if ``live_migration_wait_for_vif_plug`` is
True and ``vif_plugging_timeout`` is greater than 0, and a timeout is
reached, the live migration process will fail with an error but the guest
transfer will not have started to the destination host
* [DEFAULT]/vif_plugging_timeout: if ``live_migration_wait_for_vif_plug`` is
True, this controls the amount of time to wait before timing out and either
failing if ``vif_plugging_is_fatal`` is True, or simply continuing with the
live migration
"""),
cfg.IntOpt('max_concurrent_disk_ops',
default=0,
min=0,
help="""
Number of concurrent disk-IO-intensive operations (glance image downloads,
image format conversions, etc.) that we will do in parallel. If this is set
too high then response time suffers.
The default value of 0 means no limit.
"""),
cfg.IntOpt('max_disk_devices_to_attach',
default=-1,
min=-1,
help="""
Maximum number of disk devices allowed to attach to a single server. Note
that the number of disks supported by an server depends on the bus used. For
example, the ``ide`` disk bus is limited to 4 attached devices. The configured
maximum is enforced during server create, rebuild, evacuate, unshelve, live
migrate, and attach volume.
Usually, disk bus is determined automatically from the device type or disk
device, and the virtualization type. However, disk bus
can also be specified via a block device mapping or an image property.
See the ``disk_bus`` field in :doc:`/user/block-device-mapping` for more
information about specifying disk bus in a block device mapping, and
see https://docs.openstack.org/glance/latest/admin/useful-image-properties.html
for more information about the ``hw_disk_bus`` image property.
Operators changing the ``[compute]/max_disk_devices_to_attach`` on a compute
service that is hosting servers should be aware that it could cause rebuilds to
fail, if the maximum is decreased lower than the number of devices already
attached to servers. For example, if server A has 26 devices attached and an
operators changes ``[compute]/max_disk_devices_to_attach`` to 20, a request to
rebuild server A will fail and go into ERROR state because 26 devices are
already attached and exceed the new configured maximum of 20.
Operators setting ``[compute]/max_disk_devices_to_attach`` should also be aware
that during a cold migration, the configured maximum is only enforced in-place
and the destination is not checked before the move. This means if an operator
has set a maximum of 26 on compute host A and a maximum of 20 on compute host
B, a cold migration of a server with 26 attached devices from compute host A to
compute host B will succeed. Then, once the server is on compute host B, a
subsequent request to rebuild the server will fail and go into ERROR state
because 26 devices are already attached and exceed the configured maximum of 20
on compute host B.
The configured maximum is not enforced on shelved offloaded servers, as they
have no compute host.
Possible values:
* -1 means unlimited
* Any integer >= 0 represents the maximum allowed
"""),
cfg.StrOpt('provider_config_location',
default='/etc/nova/provider_config/',
help="""
Location of YAML files containing resource provider configuration data.
These files allow the operator to specify additional custom inventory and
traits to assign to one or more resource providers.
Additional documentation is available here:
https://docs.openstack.org/nova/latest/admin/managing-resource-providers.html
"""),
cfg.ListOpt('image_type_exclude_list',
default=[],
help="""
A list of image formats that should not be advertised as supported by this
compute node.
In some situations, it may be desirable to have a compute node
refuse to support an expensive or complex image format. This factors into
the decisions made by the scheduler about which compute node to select when
booted with a given image.
Possible values:
* Any glance image ``disk_format`` name (i.e. ``raw``, ``qcow2``, etc)
Related options:
* ``[scheduler]query_placement_for_image_type_support`` - enables
filtering computes based on supported image types, which is required
to be enabled for this to take effect.
"""),
]
interval_opts = [
cfg.IntOpt('sync_power_state_interval',
default=600,
help="""
Interval to sync power states between the database and the hypervisor.
The interval that Nova checks the actual virtual machine power state
and the power state that Nova has in its database. If a user powers
down their VM, Nova updates the API to report the VM has been
powered down. Should something turn on the VM unexpectedly,
Nova will turn the VM back off to keep the system in the expected
state.
Possible values:
* 0: Will run at the default periodic interval.
* Any value < 0: Disables the option.
* Any positive integer in seconds.
Related options:
* If ``handle_virt_lifecycle_events`` in the ``workarounds`` group is
false and this option is negative, then instances that get out
of sync between the hypervisor and the Nova database will have
to be synchronized manually.
"""),
cfg.IntOpt('heal_instance_info_cache_interval',
default=60,
help="""
Interval between instance network information cache updates.
Number of seconds after which each compute node runs the task of
querying Neutron for all of its instances networking information,
then updates the Nova db with that information. Nova will never
update it's cache if this option is set to 0. If we don't update the
cache, the metadata service and nova-api endpoints will be proxying
incorrect network data about the instance. So, it is not recommended
to set this option to 0.
Possible values:
* Any positive integer in seconds.
* Any value <=0 will disable the sync. This is not recommended.
"""),
cfg.IntOpt('reclaim_instance_interval',
default=0,
help="""
Interval for reclaiming deleted instances.
A value greater than 0 will enable SOFT_DELETE of instances.
This option decides whether the server to be deleted will be put into
the SOFT_DELETED state. If this value is greater than 0, the deleted
server will not be deleted immediately, instead it will be put into
a queue until it's too old (deleted time greater than the value of
reclaim_instance_interval). The server can be recovered from the
delete queue by using the restore action. If the deleted server remains
longer than the value of reclaim_instance_interval, it will be
deleted by a periodic task in the compute service automatically.
Note that this option is read from both the API and compute nodes, and
must be set globally otherwise servers could be put into a soft deleted
state in the API and never actually reclaimed (deleted) on the compute
node.
.. note:: When using this option, you should also configure the ``[cinder]``
auth options, e.g. ``auth_type``, ``auth_url``, ``username``, etc.
Since the reclaim happens in a periodic task, there is no user token
to cleanup volumes attached to any SOFT_DELETED servers so nova must
be configured with administrator role access to cleanup those
resources in cinder.
Possible values:
* Any positive integer(in seconds) greater than 0 will enable
this option.
* Any value <=0 will disable the option.
Related options:
* [cinder] auth options for cleaning up volumes attached to servers during
the reclaim process
"""),
cfg.IntOpt('volume_usage_poll_interval',
default=0,
help="""
Interval for gathering volume usages.
This option updates the volume usage cache for every
volume_usage_poll_interval number of seconds.
Possible values:
* Any positive integer(in seconds) greater than 0 will enable
this option.
* Any value <=0 will disable the option.
"""),
cfg.IntOpt('shelved_poll_interval',
default=3600,
help="""
Interval for polling shelved instances to offload.
The periodic task runs for every shelved_poll_interval number
of seconds and checks if there are any shelved instances. If it
finds a shelved instance, based on the 'shelved_offload_time' config
value it offloads the shelved instances. Check 'shelved_offload_time'
config option description for details.
Possible values:
* Any value <= 0: Disables the option.
* Any positive integer in seconds.
Related options:
* ``shelved_offload_time``
"""),
cfg.IntOpt('shelved_offload_time',
default=0,
help="""
Time before a shelved instance is eligible for removal from a host.
By default this option is set to 0 and the shelved instance will be
removed from the hypervisor immediately after shelve operation.
Otherwise, the instance will be kept for the value of
shelved_offload_time(in seconds) so that during the time period the
unshelve action will be faster, then the periodic task will remove
the instance from hypervisor after shelved_offload_time passes.
Possible values:
* 0: Instance will be immediately offloaded after being
shelved.
* Any value < 0: An instance will never offload.
* Any positive integer in seconds: The instance will exist for
the specified number of seconds before being offloaded.
"""),
# NOTE(melwitt): We're also using this option as the interval for cleaning
# up expired console authorizations from the database. It's related to the
# delete_instance_interval in that it's another task for cleaning up
# resources related to an instance.
cfg.IntOpt('instance_delete_interval',
default=300,
help="""
Interval for retrying failed instance file deletes.
This option depends on 'maximum_instance_delete_attempts'.
This option specifies how often to retry deletes whereas
'maximum_instance_delete_attempts' specifies the maximum number
of retry attempts that can be made.
Possible values:
* 0: Will run at the default periodic interval.
* Any value < 0: Disables the option.
* Any positive integer in seconds.
Related options:
* ``maximum_instance_delete_attempts`` from instance_cleaning_opts
group.
"""),
cfg.IntOpt('block_device_allocate_retries_interval',
default=3,
min=0,
help="""
Interval (in seconds) between block device allocation retries on failures.
This option allows the user to specify the time interval between
consecutive retries. The ``block_device_allocate_retries`` option specifies
the maximum number of retries.
Possible values:
* 0: Disables the option.
* Any positive integer in seconds enables the option.
Related options:
* ``block_device_allocate_retries`` - controls the number of retries
"""),
cfg.IntOpt('scheduler_instance_sync_interval',
default=120,
help="""
Interval between sending the scheduler a list of current instance UUIDs to
verify that its view of instances is in sync with nova.
If the CONF option 'scheduler_tracks_instance_changes' is
False, the sync calls will not be made. So, changing this option will
have no effect.
If the out of sync situations are not very common, this interval
can be increased to lower the number of RPC messages being sent.
Likewise, if sync issues turn out to be a problem, the interval
can be lowered to check more frequently.
Possible values:
* 0: Will run at the default periodic interval.
* Any value < 0: Disables the option.
* Any positive integer in seconds.
Related options:
* This option has no impact if ``scheduler_tracks_instance_changes``
is set to False.
"""),
cfg.IntOpt('update_resources_interval',
default=0,
help="""
Interval for updating compute resources.
This option specifies how often the update_available_resource
periodic task should run. A number less than 0 means to disable the
task completely. Leaving this at the default of 0 will cause this to
run at the default periodic interval. Setting it to any positive
value will cause it to run at approximately that number of seconds.
Possible values:
* 0: Will run at the default periodic interval.
* Any value < 0: Disables the option.
* Any positive integer in seconds.
""")
]
timeout_opts = [
cfg.IntOpt("reboot_timeout",
default=0,
min=0,
help="""
Time interval after which an instance is hard rebooted automatically.
When doing a soft reboot, it is possible that a guest kernel is
completely hung in a way that causes the soft reboot task
to not ever finish. Setting this option to a time period in seconds
will automatically hard reboot an instance if it has been stuck
in a rebooting state longer than N seconds.
Possible values:
* 0: Disables the option (default).
* Any positive integer in seconds: Enables the option.
"""),
cfg.IntOpt("instance_build_timeout",
default=0,
min=0,
help="""
Maximum time in seconds that an instance can take to build.
If this timer expires, instance status will be changed to ERROR.
Enabling this option will make sure an instance will not be stuck
in BUILD state for a longer period.
Possible values:
* 0: Disables the option (default)
* Any positive integer in seconds: Enables the option.
"""),
cfg.IntOpt("rescue_timeout",
default=0,
min=0,
help="""
Interval to wait before un-rescuing an instance stuck in RESCUE.
Possible values:
* 0: Disables the option (default)
* Any positive integer in seconds: Enables the option.
"""),
cfg.IntOpt("resize_confirm_window",
default=0,
min=0,
help="""
Automatically confirm resizes after N seconds.
Resize functionality will save the existing server before resizing.
After the resize completes, user is requested to confirm the resize.
The user has the opportunity to either confirm or revert all
changes. Confirm resize removes the original server and changes
server status from resized to active. Setting this option to a time
period (in seconds) will automatically confirm the resize if the
server is in resized state longer than that time.
Possible values:
* 0: Disables the option (default)
* Any positive integer in seconds: Enables the option.
"""),
cfg.IntOpt("shutdown_timeout",
default=60,
min=0,
help="""
Total time to wait in seconds for an instance to perform a clean
shutdown.
It determines the overall period (in seconds) a VM is allowed to
perform a clean shutdown. While performing stop, rescue and shelve,
rebuild operations, configuring this option gives the VM a chance
to perform a controlled shutdown before the instance is powered off.
The default timeout is 60 seconds. A value of 0 (zero) means the guest
will be powered off immediately with no opportunity for guest OS clean-up.
The timeout value can be overridden on a per image basis by means
of os_shutdown_timeout that is an image metadata setting allowing
different types of operating systems to specify how much time they
need to shut down cleanly.
Possible values:
* A positive integer or 0 (default value is 60).
""")
]
running_deleted_opts = [
cfg.StrOpt("running_deleted_instance_action",
default="reap",
choices=[
('reap', 'Powers down the instances and deletes them'),
('log', 'Logs warning message about deletion of the resource'),
('shutdown', 'Powers down instances and marks them as '
'non-bootable which can be later used for debugging/analysis'),
('noop', 'Takes no action'),
],
help="""
The compute service periodically checks for instances that have been
deleted in the database but remain running on the compute node. The
above option enables action to be taken when such instances are
identified.
Related options:
* ``running_deleted_instance_poll_interval``
* ``running_deleted_instance_timeout``
"""),
cfg.IntOpt("running_deleted_instance_poll_interval",
default=1800,
help="""
Time interval in seconds to wait between runs for the clean up action.
If set to 0, above check will be disabled. If "running_deleted_instance
_action" is set to "log" or "reap", a value greater than 0 must be set.
Possible values:
* Any positive integer in seconds enables the option.
* 0: Disables the option.
* 1800: Default value.
Related options:
* running_deleted_instance_action
"""),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
help="""
Time interval in seconds to wait for the instances that have
been marked as deleted in database to be eligible for cleanup.
Possible values:
* Any positive integer in seconds(default is 0).
Related options:
* "running_deleted_instance_action"
"""),
]
instance_cleaning_opts = [
cfg.IntOpt('maximum_instance_delete_attempts',
default=5,
min=1,
help="""
The number of times to attempt to reap an instance's files.
This option specifies the maximum number of retry attempts
that can be made.
Possible values:
* Any positive integer defines how many attempts are made.
Related options:
* ``[DEFAULT] instance_delete_interval`` can be used to disable this option.
""")
]
db_opts = [
cfg.StrOpt('osapi_compute_unique_server_name_scope',
default='',
choices=[
('', 'An empty value means that no uniqueness check is done and '
'duplicate names are possible'),
('project', 'The instance name check is done only for instances '
'within the same project'),
('global', 'The instance name check is done for all instances '
'regardless of the project'),
],
help="""
Sets the scope of the check for unique instance names.
The default doesn't check for unique names. If a scope for the name check is
set, a launch of a new instance or an update of an existing instance with a
duplicate name will result in an ''InstanceExists'' error. The uniqueness is
case-insensitive. Setting this option can increase the usability for end
users as they don't have to distinguish among instances with the same name
by their IDs.
"""),
cfg.BoolOpt('enable_new_services',
default=True,
help="""
Enable new nova-compute services on this host automatically.
When a new nova-compute service starts up, it gets
registered in the database as an enabled service. Sometimes it can be useful
to register new compute services in disabled state and then enabled them at a
later point in time. This option only sets this behavior for nova-compute
services, it does not auto-disable other services like nova-conductor,
nova-scheduler, or nova-osapi_compute.
Possible values:
* ``True``: Each new compute service is enabled as soon as it registers itself.
* ``False``: Compute services must be enabled via an os-services REST API call
or with the CLI with ``nova service-enable <hostname> <binary>``, otherwise
they are not ready to use.
"""),
cfg.StrOpt('instance_name_template',
default='instance-%08x',
help="""
Template string to be used to generate instance names.
This template controls the creation of the database name of an instance. This
is *not* the display name you enter when creating an instance (via Horizon
or CLI). For a new deployment it is advisable to change the default value
(which uses the database autoincrement) to another value which makes use
of the attributes of an instance, like ``instance-%(uuid)s``. If you
already have instances in your deployment when you change this, your
deployment will break.
Possible values:
* A string which either uses the instance database ID (like the
default)
* A string with a list of named database columns, for example ``%(id)d``
or ``%(uuid)s`` or ``%(hostname)s``.
"""),
]
ALL_OPTS = (compute_opts +
resource_tracker_opts +
allocation_ratio_opts +
compute_manager_opts +
interval_opts +
timeout_opts +
running_deleted_opts +
instance_cleaning_opts +
db_opts)
def register_opts(conf):
conf.register_opts(ALL_OPTS)
conf.register_group(compute_group)
conf.register_opts(compute_group_opts, group=compute_group)
def list_opts():
return {'DEFAULT': ALL_OPTS,
'compute': compute_group_opts}
| 34.982385 | 83 | 0.740927 |
c3bf76bc115781e2d1b3d853a430f799457767f1
| 926 |
py
|
Python
|
var/spack/repos/builtin/packages/r-later/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 |
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/r-later/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 |
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/r-later/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 |
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RLater(RPackage):
"""Utilities for Scheduling Functions to Execute Later with Event Loops.
Executes arbitrary R or C functions some time after the current time, after
the R execution stack has emptied."""
cran = "later"
version('1.3.0', sha256='08f50882ca3cfd2bb68c83f1fcfbc8f696f5cfb5a42c1448c051540693789829')
version('1.1.0.1', sha256='71baa7beae774a35a117e01d7b95698511c3cdc5eea36e29732ff1fe8f1436cd')
version('0.8.0', sha256='6b2a28b43c619b2c7890840c62145cd3a34a7ed65b31207fdedde52efb00e521')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-rlang', type=('build', 'run'))
depends_on('r-bh', type=('build', 'run'), when='@:1.1.0.1')
| 37.04 | 97 | 0.733261 |
4c076308e6cdc168e614c9eb1a37c3c0f96078e4
| 1,664 |
py
|
Python
|
api/main.py
|
folksilva/hawknest
|
bd074da8cdac1020009c8b83ee648c8b253e66f0
|
[
"Apache-2.0"
] | null | null | null |
api/main.py
|
folksilva/hawknest
|
bd074da8cdac1020009c8b83ee648c8b253e66f0
|
[
"Apache-2.0"
] | null | null | null |
api/main.py
|
folksilva/hawknest
|
bd074da8cdac1020009c8b83ee648c8b253e66f0
|
[
"Apache-2.0"
] | 1 |
2019-05-07T13:26:51.000Z
|
2019-05-07T13:26:51.000Z
|
"""
Data models:
TYPE:
- _id = ObjectId do tipo
- name = Nome do tipo
- level = Nível de acesso do tipo
- groups = Grupos com acesso ao tipo
GROUP
- _id = ObjectId do grupo
- name = Nome do grupo
- users = Usuários no grupo
PERMISSIONS
- _id = ObjectId da permissão (fixo como 'main')
- employees = Usuários com nível colaborador (2)
- managers = Usuários com nível gerente (3)
"""
import os
import secrets
from flask import Flask
from flask_simpleldap import LDAP
from flask_pymongo import PyMongo
from flask_cors import CORS
from session import MongoSessionInterface
from elasticsearch import Elasticsearch
app = Flask(__name__)
# Configure App
app.config['MONGO_URI'] = 'mongodb://%s/%s' % (
os.getenv('MONGO_HOST'),
os.getenv('MONGO_DBNAME')
)
app.config['LDAP_HOST'] = os.getenv('LDAP_HOST')
app.config['LDAP_USERNAME'] = os.getenv('LDAP_USERNAME')
app.config['LDAP_PASSWORD'] = os.getenv('LDAP_PASSWORD')
app.config['LDAP_BASE_DN'] = os.getenv('LDAP_BASE_DN')
app.config['LDAP_DOMAIN'] = os.getenv('LDAP_DOMAIN')
app.config['APP_NAME'] = os.getenv('APP_NAME', 'Hawknest')
app.config['ADMIN_USERS'] = os.getenv('ADMIN_USERS', '')
app.config['ELASTICSEARCH_URI'] = os.getenv('ELASTICSEARCH_URI')
app.config['ELASTICSEARCH_INDEX'] = os.getenv('ELASTICSEARCH_INDEX')
app.secret_key = secrets.token_urlsafe(128)
# Initialize extensions
mongo = PyMongo(app)
ldap = LDAP(app)
cors = CORS(app)
app.session_interface = MongoSessionInterface(db=mongo.db)
es = Elasticsearch([app.config['ELASTICSEARCH_URI']])
import util
import errors
# Configure routes
import document_views
import type_views
import group_views
import auth_views
import search_views
| 26 | 68 | 0.754207 |
63e4f20a2d2e22691cc77b65e7fe762d410b11ed
| 652 |
py
|
Python
|
deltalanguage/test/test_wiring.py
|
riverlane/deltalanguage
|
41c3cfa88ed3f17956645c18566c2147a4bdd74c
|
[
"MIT"
] | 16 |
2021-01-06T17:44:51.000Z
|
2022-01-06T12:07:07.000Z
|
deltalanguage/test/test_wiring.py
|
riverlane/deltalanguage
|
41c3cfa88ed3f17956645c18566c2147a4bdd74c
|
[
"MIT"
] | null | null | null |
deltalanguage/test/test_wiring.py
|
riverlane/deltalanguage
|
41c3cfa88ed3f17956645c18566c2147a4bdd74c
|
[
"MIT"
] | 4 |
2021-03-25T20:35:08.000Z
|
2021-09-06T13:10:58.000Z
|
"""Testing wiring of the graph."""
import unittest
import deltalanguage as dl
from deltalanguage.test._node_lib import terminate_non_const
class NoOutputTest(unittest.TestCase):
def test_error_thrown(self):
"""Nodes without output ports cannot produce output.
.. note::
This is very different from python's functions that return None
by default.
"""
with self.assertRaises(dl.data_types.DeltaIOError):
with dl.DeltaGraph():
no_message = terminate_non_const(5)
terminate_non_const(no_message)
if __name__ == "__main__":
unittest.main()
| 24.148148 | 75 | 0.657975 |
f63136f539ff5c08f745d9a0bb716291fe249ad2
| 13,084 |
py
|
Python
|
arcanelab/ouroboros/migrations/0001_initial.py
|
luismasuelli/arcanelab-ouroboros
|
8a7b6003234a244cd4198c842ba69fdd76e2255c
|
[
"MIT"
] | null | null | null |
arcanelab/ouroboros/migrations/0001_initial.py
|
luismasuelli/arcanelab-ouroboros
|
8a7b6003234a244cd4198c842ba69fdd76e2255c
|
[
"MIT"
] | null | null | null |
arcanelab/ouroboros/migrations/0001_initial.py
|
luismasuelli/arcanelab-ouroboros
|
8a7b6003234a244cd4198c842ba69fdd76e2255c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-19 00:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import arcanelab.ouroboros.fields
import arcanelab.ouroboros.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='CourseInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(default=django.utils.timezone.now, editable=False, help_text='Date and time of record creation', verbose_name='Creation Date')),
('updated_on', models.DateTimeField(default=django.utils.timezone.now, editable=False, help_text='Date and time of last record update', verbose_name='Update Date')),
('term_level', models.PositiveIntegerField(blank=True, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CourseSpec',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.SlugField(blank=True, help_text='Internal (unique) code', max_length=20, verbose_name='Code')),
('name', models.CharField(max_length=60, verbose_name='Name')),
('description', models.TextField(max_length=1023, verbose_name='Description')),
('cancel_permission', models.CharField(blank=True, help_text='Permission code (as <application>.<permission>) to test against when this course instance is cancelled. The user who intends to cancel this course instance must satisfy this permission against the associated document.', max_length=201, null=True, verbose_name='Cancel Permission')),
],
options={
'verbose_name': 'Course Spec',
'verbose_name_plural': 'Course Specs',
},
),
migrations.CreateModel(
name='NodeInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(default=django.utils.timezone.now, editable=False, help_text='Date and time of record creation', verbose_name='Creation Date')),
('updated_on', models.DateTimeField(default=django.utils.timezone.now, editable=False, help_text='Date and time of last record update', verbose_name='Update Date')),
('course_instance', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='node_instance', to='ouroboros.CourseInstance')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='NodeSpec',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('enter', 'Enter'), ('exit', 'Exit'), ('cancel', 'Cancel'),
('joined', 'Joined'), ('input', 'Input'), ('step', 'Step'),
('multiplexer', 'Multiplexer'), ('split', 'Split')],
help_text='Node type', max_length=15, verbose_name='Type')),
('code', models.SlugField(help_text='Internal (unique) code', max_length=20, verbose_name='Code')),
('name', models.CharField(max_length=60, verbose_name='Name')),
('description', models.TextField(max_length=1023, verbose_name='Description')),
('landing_handler', arcanelab.ouroboros.fields.CallableReferenceField(blank=True, help_text='A callable that will triggered when this node is reached. The expected signature is (document, user) since no interaction is expected to exist with the workflow instance, but the handlers should perform actions in the document', max_length=255, null=True, verbose_name='Landing Handler')),
('exit_value', models.PositiveSmallIntegerField(blank=True, help_text='Exit value. Expected only for exit nodes', null=True, verbose_name='Exit Value')),
('joiner', arcanelab.ouroboros.fields.CallableReferenceField(blank=True, help_text="A callable that will be triggered every time a split's branch reaches an end. The split's branch will trigger this callable which must return a valid transition name (existing action as outbound in this node) to leave the split and take an action, or None to remain in the split and wait for other branches (an exception will be raised if None is returned but no branch is still to finish). Its contract is (document, statuses, last) being the associated document, a dictionary of branch codes and their exit values (None: running; -1: cancelled or joined,>= 0: terminated by exit node), and the code of the branch being joined (such code will be present in the dictionary)", max_length=255, null=True, verbose_name='Joiner')),
('execute_permission', models.CharField(blank=True, help_text='Permission code (as <application>.<permission>) to test against when an action on this node is executed. The user who intends to execute the action in this node must satisfy this permission against the associated document', max_length=201, null=True, verbose_name='Cancel Permission')),
('branches', models.ManyToManyField(blank=True, help_text='Courses this node branches to. Expected only for split nodes', related_name='callers', to='ouroboros.CourseSpec', verbose_name='Branches')),
('course_spec', models.ForeignKey(help_text='Course spec this node spec belongs to', on_delete=django.db.models.deletion.CASCADE, related_name='node_specs', to='ouroboros.CourseSpec', verbose_name='Course Spec')),
],
options={
'verbose_name': 'Node',
'verbose_name_plural': 'Nodes',
},
),
migrations.CreateModel(
name='TransitionSpec',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_name', models.SlugField(blank=True, help_text='Action name for this transition. Unique with respect to the origin node. Expected only for split and input nodes', max_length=20, null=True, verbose_name='Action Name')),
('name', models.CharField(max_length=60, verbose_name='Name')),
('description', models.TextField(max_length=1023, verbose_name='Description')),
('permission', models.CharField(blank=True, help_text='Permission code (as <application>.<permission>) to test against. It is not required, but only allowed if coming from an input node', max_length=201, null=True, verbose_name='Permission')),
('condition', arcanelab.ouroboros.fields.CallableReferenceField(blank=True, help_text='A callable evaluating the condition. Expected only for multiplexer nodes. The condition will evaluate with signature (document, user) and will return a value that will be treated as boolean.', max_length=255, null=True, verbose_name='Condition')),
('priority', models.PositiveSmallIntegerField(blank=True, help_text='A priority value used to order evaluation of condition. Expected only for multiplexer nodes', null=True, verbose_name='Priority')),
('destination', models.ForeignKey(help_text='Destination node', on_delete=django.db.models.deletion.CASCADE, related_name='inbounds', to='ouroboros.NodeSpec', validators=[arcanelab.ouroboros.models.valid_destination_types], verbose_name='Destination')),
('origin', models.ForeignKey(help_text='Origin node', on_delete=django.db.models.deletion.CASCADE, related_name='outbounds', to='ouroboros.NodeSpec', validators=[arcanelab.ouroboros.models.valid_origin_types], verbose_name='Origin')),
],
options={
'verbose_name': 'Transition Spec',
'verbose_name_plural': 'Transition Specs',
},
),
migrations.CreateModel(
name='WorkflowInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(default=django.utils.timezone.now, editable=False, help_text='Date and time of record creation', verbose_name='Creation Date')),
('updated_on', models.DateTimeField(default=django.utils.timezone.now, editable=False, help_text='Date and time of last record update', verbose_name='Update Date')),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'verbose_name': 'Workflow Instance',
'verbose_name_plural': 'Workflow Instances',
},
),
migrations.CreateModel(
name='WorkflowSpec',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.SlugField(help_text='Internal (unique) code', max_length=20, unique=True, verbose_name='Code')),
('name', models.CharField(max_length=60, verbose_name='Name')),
('description', models.TextField(max_length=1023, verbose_name='Description')),
('create_permission', models.CharField(blank=True, help_text='Permission code (as <application>.<permission>) to test against when a workflow instance is created. The user who intends to create a workflow instance must satisfy this permission against the associated document.', max_length=201, null=True, verbose_name='Create Permission')),
('cancel_permission', models.CharField(blank=True, help_text='Permission code (as <application>.<permission>) to test against when a course instance is cancelled. The user who intends to cancel a course instance in this workflow must satisfy this permission against the associated document.', max_length=201, null=True, verbose_name='Cancel Permission')),
('document_type', models.ForeignKey(help_text='Accepted related document class', on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', validators=[arcanelab.ouroboros.models.valid_document_type], verbose_name='Document Type')),
],
options={
'verbose_name': 'Workflow Spec',
'verbose_name_plural': 'Workflow Specs',
},
),
migrations.AddField(
model_name='workflowinstance',
name='workflow_spec',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='instances', to='ouroboros.WorkflowSpec'),
),
migrations.AddField(
model_name='nodeinstance',
name='node_spec',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='ouroboros.NodeSpec'),
),
migrations.AddField(
model_name='coursespec',
name='workflow_spec',
field=models.ForeignKey(help_text='Workflow spec this course spec belongs to', on_delete=django.db.models.deletion.CASCADE, related_name='course_specs', to='ouroboros.WorkflowSpec', verbose_name='Workflow Spec'),
),
migrations.AddField(
model_name='courseinstance',
name='course_spec',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ouroboros.CourseSpec'),
),
migrations.AddField(
model_name='courseinstance',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='branches', to='ouroboros.NodeInstance'),
),
migrations.AddField(
model_name='courseinstance',
name='workflow_instance',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='courses', to='ouroboros.WorkflowInstance'),
),
migrations.AlterUniqueTogether(
name='workflowinstance',
unique_together={('content_type', 'object_id')},
),
migrations.AlterUniqueTogether(
name='nodespec',
unique_together={('course_spec', 'code')},
),
migrations.AlterUniqueTogether(
name='coursespec',
unique_together={('workflow_spec', 'code')},
),
]
| 75.630058 | 827 | 0.657979 |
c9a7275608d601c1bd4b661e6154f41b450a9d7e
| 1,758 |
py
|
Python
|
app/user/serializers.py
|
PhusionDev/recipe-app-api
|
8a2b0d7340cb2b214aaee979a8d22e59ecd17dea
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
PhusionDev/recipe-app-api
|
8a2b0d7340cb2b214aaee979a8d22e59ecd17dea
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
PhusionDev/recipe-app-api
|
8a2b0d7340cb2b214aaee979a8d22e59ecd17dea
|
[
"MIT"
] | null | null | null |
"""User Serializer Module"""
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 30.842105 | 74 | 0.646758 |
fa354fea724873e17b099aa1dcf236f31379f647
| 1,522 |
py
|
Python
|
oracle/arxiv/create_arxiv_jsonl.py
|
Yale-LILY/DYLE
|
0955035f244948b4b3a0dd49ff495ce3a99fd169
|
[
"MIT"
] | 2 |
2021-12-31T08:29:51.000Z
|
2022-01-24T13:13:24.000Z
|
oracle/arxiv/create_arxiv_jsonl.py
|
Yale-LILY/DYLE
|
0955035f244948b4b3a0dd49ff495ce3a99fd169
|
[
"MIT"
] | null | null | null |
oracle/arxiv/create_arxiv_jsonl.py
|
Yale-LILY/DYLE
|
0955035f244948b4b3a0dd49ff495ce3a99fd169
|
[
"MIT"
] | 2 |
2021-10-31T02:51:36.000Z
|
2021-12-25T00:32:49.000Z
|
import json
from tqdm import tqdm
from nltk.tokenize import sent_tokenize, word_tokenize
import os.path
from datasets import load_dataset
mode = "test"
dataset = load_dataset("scientific_papers", "arxiv", split=mode)
source_content = [data_entity["article"] for data_entity in dataset]
target_content = [data_entity["abstract"] for data_entity in dataset]
data = []
idx = 0
token_limit = 64
def insert_new(article_list, sent):
token_list = word_tokenize(sent)
while len(token_list) > token_limit:
article_list.append(" ".join(token_list[:token_limit]))
token_list = token_list[token_limit:]
article_list.append(" ".join(token_list))
def process_article_sent_tokenize(article):
article = " ".join(word_tokenize(article.lower()))
article = sent_tokenize(article)
return article
def process_article(article):
article = process_article_sent_tokenize(article)
new_article = []
for sent in article:
insert_new(new_article, sent)
return new_article
for article, summary in tqdm(zip(source_content, target_content), total = len(source_content)):
json_entry = {}
json_entry["article"] = article
json_entry["summary"] = summary
fname = "./index/{}.dec".format(mode, idx)
with open(fname) as f:
idx += 1
oracle = f.read().strip()
oracle = json.loads(oracle)
if len(oracle) > 100:
article = process_article(article)
| 26.241379 | 96 | 0.664258 |
fc50f3cbeb8ec2e2d21782c552c5e8feb02d1f56
| 586 |
py
|
Python
|
StatTables/Python Data Generation/Chi-Squared.py
|
LukeBaal/PublicProjects
|
e9aae06b95189b69d642a6eaae53e3d970452e66
|
[
"MIT"
] | null | null | null |
StatTables/Python Data Generation/Chi-Squared.py
|
LukeBaal/PublicProjects
|
e9aae06b95189b69d642a6eaae53e3d970452e66
|
[
"MIT"
] | null | null | null |
StatTables/Python Data Generation/Chi-Squared.py
|
LukeBaal/PublicProjects
|
e9aae06b95189b69d642a6eaae53e3d970452e66
|
[
"MIT"
] | null | null | null |
import csv
data = open("Chi-Squared_Dist.csv", "w")
data.write("k, x, p\n")
with open('Book1.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
file_list = list(reader)
del file_list[0]
for row in file_list:
del row[-1]
p = file_list[0]
del file_list[0]
p = p[1:]
k = []
for item in file_list:
k.append(item[0])
del item[0]
print p
print file_list
print len(p)
print len(file_list)
for i in range(len(file_list)):
for l in range(len(p)):
result = str(k[i]) + "," + str(file_list[i][l]) + "," + str(p[l])
data.write(result + "\n")
| 16.742857 | 73 | 0.59727 |
c117259b61bae755ab17eb57a4955ea6e41f2b35
| 719 |
py
|
Python
|
tobiko/common/_utils.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | 1 |
2022-01-11T20:50:06.000Z
|
2022-01-11T20:50:06.000Z
|
tobiko/common/_utils.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | null | null | null |
tobiko/common/_utils.py
|
FedericoRessi/tobiko
|
188825386dc30197a37b7fe8be03318c73abbc48
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
def get_short_hostname(hostname):
return hostname.lower().split('.', 1)[0]
| 37.842105 | 78 | 0.730181 |
c8ac8d81f2153bded0f17da97fdfaaad0676fff5
| 1,295 |
py
|
Python
|
api/handler/statistic_handler.py
|
WeiShiwei/tornado_glearn
|
5e74bbaaee4d2b8c5abf3b60cffbe54694a3bc6f
|
[
"Apache-2.0"
] | null | null | null |
api/handler/statistic_handler.py
|
WeiShiwei/tornado_glearn
|
5e74bbaaee4d2b8c5abf3b60cffbe54694a3bc6f
|
[
"Apache-2.0"
] | null | null | null |
api/handler/statistic_handler.py
|
WeiShiwei/tornado_glearn
|
5e74bbaaee4d2b8c5abf3b60cffbe54694a3bc6f
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
u"""
Description: Report statistic information.
User: Jerry.Fang
Date: 14-01-06
"""
import traceback
from api.handler.base_handler import BaseHandler
import ujson
from statistic.heart_beat import HeartReqProcess
from statistic.stat_info_querier import StatInfoQuerier
class HeartRequestHandler(BaseHandler):
_label = 'HeartRequestHandler'
def get(self):
try:
result = {
u'heart_response': HeartReqProcess.get_response()
}
self._json_response(result)
except:
self.send_error()
self._app_logger.error(traceback.format_exc())
class QueryStatInfoHandler(BaseHandler):
_label = 'QueryStatInfoHandler'
def get(self):
try:
ind_list = dict()
ind_list[u'query_msg_count'] = self.get_argument('query_msg_count', default='')
ind_list[u'query_extract_rate'] = self.get_argument('query_extract_rate', default='')
ind_list[u'query_extract_time'] = self.get_argument('query_extract_time', default='')
result = dict()
StatInfoQuerier.query(ind_list, result)
self._json_response(result)
except:
self.send_error()
self._app_logger.error(traceback.format_exc())
| 30.833333 | 97 | 0.657143 |
4e44460d78954fa06a699eb293a4fe50d522616b
| 4,482 |
py
|
Python
|
spikeextractors/extractors/openephysextractors/openephysextractors.py
|
alowet/spikeextractors
|
3e5a175a5523a3eb3578f96b0fffebee54108d9f
|
[
"MIT"
] | null | null | null |
spikeextractors/extractors/openephysextractors/openephysextractors.py
|
alowet/spikeextractors
|
3e5a175a5523a3eb3578f96b0fffebee54108d9f
|
[
"MIT"
] | null | null | null |
spikeextractors/extractors/openephysextractors/openephysextractors.py
|
alowet/spikeextractors
|
3e5a175a5523a3eb3578f96b0fffebee54108d9f
|
[
"MIT"
] | null | null | null |
from spikeextractors import RecordingExtractor, SortingExtractor
from pathlib import Path
import numpy as np
from spikeextractors.extraction_tools import check_get_traces_args, check_valid_unit_id, check_get_ttl_args
try:
import pyopenephys
HAVE_OE = True
except ImportError:
HAVE_OE = False
class OpenEphysRecordingExtractor(RecordingExtractor):
extractor_name = 'OpenEphysRecording'
has_default_locations = False
installed = HAVE_OE # check at class level if installed or not
is_writable = False
mode = 'folder'
installation_mesg = "To use the OpenEphys extractor, install pyopenephys: \n\n pip install pyopenephys\n\n" # error message when not installed
def __init__(self, folder_path, *, experiment_id=0, recording_id=0, dtype='float'):
assert HAVE_OE, self.installation_mesg
assert dtype == 'int16' or 'float' in dtype, "'dtype' can be int16 (memory map) or 'float' (load into memory)"
RecordingExtractor.__init__(self)
self._recording_file = folder_path
self._recording = pyopenephys.File(folder_path).experiments[experiment_id].recordings[recording_id]
self._dtype = dtype
self._kwargs = {'folder_path': str(Path(folder_path).absolute()), 'experiment_id': experiment_id,
'recording_id': recording_id, 'dtype': dtype}
def get_channel_ids(self):
return list(range(self._recording.analog_signals[0].signal.shape[0]))
def get_num_frames(self):
return self._recording.analog_signals[0].signal.shape[1]
def get_sampling_frequency(self):
return float(self._recording.sample_rate.rescale('Hz').magnitude)
@check_get_traces_args
def get_traces(self, channel_ids=None, start_frame=None, end_frame=None):
if self._dtype == 'int16':
return self._recording.analog_signals[0].signal[channel_ids, start_frame:end_frame]
elif self._dtype == 'float':
return self._recording.analog_signals[0].signal[channel_ids, start_frame:end_frame] * \
self._recording.analog_signals[0].gain
@check_get_ttl_args
def get_ttl_events(self, start_frame=None, end_frame=None, channel_id=0):
channels = [np.unique(ev.channels)[0] for ev in self._recording.events]
assert channel_id in channels, f"Specified 'channel' not found. Available channels are {channels}"
ev = self._recording.events[channels.index(channel_id)]
ttl_frames = (ev.times.rescale("s") * self.get_sampling_frequency()).magnitude.astype(int)
ttl_states = np.sign(ev.channel_states)
ttl_valid_idxs = np.where((ttl_frames >= start_frame) & (ttl_frames < end_frame))[0]
return ttl_frames[ttl_valid_idxs], ttl_states[ttl_valid_idxs]
class OpenEphysSortingExtractor(SortingExtractor):
extractor_name = 'OpenEphysSortingExtractor'
installed = HAVE_OE # check at class level if installed or not
is_writable = False
mode = 'file'
installation_mesg = "To use the OpenEphys extractor, install pyopenephys: \n\n pip install pyopenephys\n\n" # error message when not installed
def __init__(self, folder_path, *, experiment_id=0, recording_id=0):
assert HAVE_OE, self.installation_mesg
SortingExtractor.__init__(self)
self._recording_file = folder_path
self._recording = pyopenephys.File(folder_path).experiments[experiment_id].recordings[recording_id]
self._spiketrains = self._recording.spiketrains
self._unit_ids = list([np.unique(st.clusters)[0] for st in self._spiketrains])
self._sampling_frequency = float(self._recording.sample_rate.rescale('Hz').magnitude)
self._kwargs = {'folder_path': str(Path(folder_path).absolute()), 'experiment_id': experiment_id,
'recording_id': recording_id}
def get_unit_ids(self):
return self._unit_ids
@check_valid_unit_id
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None):
start_frame, end_frame = self._cast_start_end_frame(start_frame, end_frame)
if start_frame is None:
start_frame = 0
if end_frame is None:
end_frame = np.Inf
st = self._spiketrains[unit_id]
inds = np.where((start_frame <= (st.times * self._recording.sample_rate)) &
((st.times * self._recording.sample_rate) < end_frame))
return (st.times[inds] * self._recording.sample_rate).magnitude
| 47.680851 | 147 | 0.708835 |
2c5aa8fece7af8980af14ecd8425d7a680b2027c
| 3,542 |
py
|
Python
|
rapiduino/exceptions.py
|
samwedge/rapiduino
|
2e991fa568128ac46babda135e67280d99b46ce2
|
[
"Apache-2.0"
] | 3 |
2017-01-31T23:26:10.000Z
|
2022-02-28T16:36:15.000Z
|
rapiduino/exceptions.py
|
samwedge/rapiduino
|
2e991fa568128ac46babda135e67280d99b46ce2
|
[
"Apache-2.0"
] | null | null | null |
rapiduino/exceptions.py
|
samwedge/rapiduino
|
2e991fa568128ac46babda135e67280d99b46ce2
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, Tuple
import rapiduino
class SerialConnectionSendDataError(Exception):
def __init__(self, n_bytes_intended: int, n_bytes_actual: int) -> None:
message = (
f"Transmitted {n_bytes_actual} bytes "
f"but expected to transmit {n_bytes_intended} bytes"
)
super().__init__(message)
class SerialConnectionReceiveDataError(Exception):
def __init__(self, n_bytes_intended: int, n_bytes_actual: int) -> None:
message = (
f"Received {n_bytes_actual} bytes "
f"but expected to receive {n_bytes_intended} bytes"
)
super().__init__(message)
class NotAnalogPinError(Exception):
def __init__(self, pin_no: int) -> None:
message = f"cannot complete operation as is_analog=False for pin {pin_no}"
super().__init__(message)
class NotPwmPinError(Exception):
def __init__(self, pin_no: int) -> None:
message = f"cannot complete operation as is_pwm=False for pin {pin_no}"
super().__init__(message)
class PinAlreadyRegisteredError(Exception):
def __init__(self, pin_no: int) -> None:
message = f"Pin {pin_no} is already registered on this board"
super().__init__(message)
class ComponentAlreadyRegisteredError(Exception):
def __init__(self) -> None:
message = "The component has already been registered to this board"
super().__init__(message)
class PinDoesNotExistError(Exception):
def __init__(self, pin_no: int) -> None:
message = f"The specified pin number {pin_no} does not exist on this board"
super().__init__(message)
class PinIsReservedForSerialCommsError(Exception):
def __init__(self, pin_no: int) -> None:
message = (
f"Pin {pin_no} is reserved for serial comms and cannot be used"
"for any other purpose"
)
super().__init__(message)
class ProtectedPinError(Exception):
"""The action cannot be completed because the specified pin is registered to a
component"""
def __init__(self, token: Optional[str]) -> None:
base_message = "Cannot perform this operation because the pin is registered"
if token is None:
message = f"{base_message} to a component"
else:
message = f"{base_message} to a different component"
super().__init__(message)
class ComponentNotRegisteredWithArduinoError(Exception):
def __init__(self) -> None:
message = "Device must be registered to an Arduino"
super().__init__(message)
class ComponentAlreadyRegisteredWithArduinoError(Exception):
def __init__(self) -> None:
message = "Device is already registered to an Arduino"
super().__init__(message)
class ArduinoSketchVersionIncompatibleError(Exception):
def __init__(
self, sketch_version: Tuple[int, ...], min_version: Tuple[int, int, int]
) -> None:
sketch_version_str = (
f"{sketch_version[0]}.{sketch_version[1]}.{sketch_version[2]}"
)
min_version_str = f"{min_version[0]}.{min_version[1]}.{min_version[2]}"
max_version_str = f"{min_version[0] + 1}.0.0"
message = (
f"Arduino sketch version {sketch_version_str} is incompatible with"
f" Rapiduino version {rapiduino.__version__}.\n"
"Please upload a compatible sketch version:"
f" Greater or equal to {min_version_str}, less than {max_version_str}"
)
super().__init__(message)
| 33.733333 | 84 | 0.662902 |
25e668c2b419a56e877f4a7ab63d7ecca07ed8de
| 1,221 |
py
|
Python
|
label_studio/data_import/urls.py
|
cdpath/label-studio
|
68d0a05b335d090160ec45a253dd4ca386de536a
|
[
"Apache-2.0"
] | 3 |
2021-07-16T03:48:21.000Z
|
2022-01-10T04:58:25.000Z
|
label_studio/data_import/urls.py
|
cdpath/label-studio
|
68d0a05b335d090160ec45a253dd4ca386de536a
|
[
"Apache-2.0"
] | 6 |
2022-02-21T15:19:35.000Z
|
2022-03-07T15:25:16.000Z
|
label_studio/data_import/urls.py
|
cdpath/label-studio
|
68d0a05b335d090160ec45a253dd4ca386de536a
|
[
"Apache-2.0"
] | 1 |
2021-08-18T15:07:47.000Z
|
2021-08-18T15:07:47.000Z
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
from django.urls import path, include
from . import api
app_name = 'data_import'
_api_urlpatterns = [
path('file-upload/<int:pk>', api.FileUploadAPI.as_view(), name='file-upload-detail')
]
_api_projects_urlpatterns = [
# import api
path('<int:pk>/tasks/bulk/', api.TasksBulkCreateAPI.as_view(), name='project-tasks-bulk-upload'),
path('<int:pk>/import', api.ImportAPI.as_view(), name='project-import'),
path('<int:pk>/reimport', api.ReImportAPI.as_view(), name='project-reimport'),
path('<int:pk>/file-uploads', api.FileUploadListAPI.as_view(), name='project-file-upload-list'),
path('<int:pk>/', api.FileUploadListAPI.as_view(), name='project-file-upload-list'),
]
urlpatterns = [
path('api/import/', include((_api_urlpatterns, app_name), namespace='api')),
path('api/projects/', include((_api_projects_urlpatterns, app_name), namespace='api-projects')),
# special endpoints for serving imported files
path('data/upload/<str:filename>', api.UploadedFileResponse.as_view(), name='data-upload'),
]
| 40.7 | 168 | 0.714988 |
bb47c6596c058a2a62fb0414404a0fd43eebd29e
| 505 |
py
|
Python
|
DeepRL/utilities/__init__.py
|
Rafapia/Deep-Reinforcement-Learning-Algorithms-with-PyTorch
|
a0d584cbdc068b9e0d0f025df153103bcf16a6e0
|
[
"MIT"
] | null | null | null |
DeepRL/utilities/__init__.py
|
Rafapia/Deep-Reinforcement-Learning-Algorithms-with-PyTorch
|
a0d584cbdc068b9e0d0f025df153103bcf16a6e0
|
[
"MIT"
] | null | null | null |
DeepRL/utilities/__init__.py
|
Rafapia/Deep-Reinforcement-Learning-Algorithms-with-PyTorch
|
a0d584cbdc068b9e0d0f025df153103bcf16a6e0
|
[
"MIT"
] | null | null | null |
__all__ = ["data_structures", "grammar_algorithms",
"Deepmind_RMS_Prop", "Memory_Shaper", "OU_Noise", "Parallel_Experience_Generator", "Tensorboard", "Utility_Functions"]
from .data_structures import *
from .grammar_algorithms import *
from .Deepmind_RMS_Prop import DM_RMSprop
from .Memory_Shaper import Memory_Shaper
from .OU_Noise import OU_Noise
from .Parallel_Experience_Generator import Parallel_Experience_Generator
from .Tensorboard import Tensorboard
from .Utility_Functions import *
| 42.083333 | 129 | 0.819802 |
7d65aed92a1bfd61cef5b347be38932d46f915db
| 4,696 |
py
|
Python
|
cloudify_aws/ec2/resources/customer_gateway.py
|
cloudify-cosmo/cloudify-aws-plugin
|
958c84cd4b95ce739ecfab2ba944c380bd4fe836
|
[
"Apache-2.0"
] | 13 |
2015-05-28T23:21:05.000Z
|
2022-03-20T05:38:20.000Z
|
cloudify_aws/ec2/resources/customer_gateway.py
|
cloudify-cosmo/cloudify-aws-plugin
|
958c84cd4b95ce739ecfab2ba944c380bd4fe836
|
[
"Apache-2.0"
] | 49 |
2015-01-04T16:05:34.000Z
|
2022-03-27T11:35:13.000Z
|
cloudify_aws/ec2/resources/customer_gateway.py
|
cloudify-cosmo/cloudify-aws-plugin
|
958c84cd4b95ce739ecfab2ba944c380bd4fe836
|
[
"Apache-2.0"
] | 41 |
2015-01-21T17:16:05.000Z
|
2022-03-31T06:47:48.000Z
|
# Copyright (c) 2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EC2.Customer Gateway
~~~~~~~~~~~~~~
AWS EC2 Customer Gateway interface
"""
# Boto
from botocore.exceptions import ClientError, ParamValidationError
# Cloudify
from cloudify_aws.common import decorators, utils
from cloudify_aws.ec2 import EC2Base
RESOURCE_TYPE = 'EC2 Customer Gateway'
CUSTOMERGATEWAYS = 'CustomerGateways'
CUSTOMERGATEWAY_ID = 'CustomerGatewayId'
CUSTOMERGATEWAY_IDS = 'CustomerGatewayIds'
PUBLIC_IP = 'PublicIp'
ELASTICIP_TYPE = 'cloudify.nodes.aws.ec2.ElasticIP'
ELASTICIP_TYPE_DEPRECATED = 'cloudify.aws.nodes.ElasticIP'
class EC2CustomerGateway(EC2Base):
"""
EC2 Customer Gateway interface
"""
def __init__(self, ctx_node, resource_id=None, client=None, logger=None):
EC2Base.__init__(self, ctx_node, resource_id, client, logger)
self.type_name = RESOURCE_TYPE
@property
def properties(self):
"""Gets the properties of an external resource"""
params = {CUSTOMERGATEWAY_IDS: [self.resource_id]}
try:
resources = \
self.client.describe_customer_gateways(**params)
except (ClientError, ParamValidationError):
pass
else:
return resources.get(CUSTOMERGATEWAYS)[0] if resources else None
@property
def status(self):
"""Gets the status of an external resource"""
props = self.properties
if not props:
return None
return props['State']
def create(self, params):
"""
Create a new AWS EC2 Customer Gateway.
"""
return self.make_client_call('create_customer_gateway', params)
def delete(self, params=None):
"""
Deletes an existing AWS EC2 Customer Gateway.
"""
self.logger.debug('Deleting %s with parameters: %s'
% (self.type_name, params))
res = self.client.delete_customer_gateway(**params)
self.logger.debug('Response: %s' % res)
return res
@decorators.aws_resource(EC2CustomerGateway, resource_type=RESOURCE_TYPE)
def prepare(ctx, resource_config, **_):
"""Prepares an AWS EC2 Customer Gateway"""
# Save the parameters
ctx.instance.runtime_properties['resource_config'] = resource_config
@decorators.aws_resource(EC2CustomerGateway, RESOURCE_TYPE)
@decorators.wait_for_status(status_good=['available'],
status_pending=['pending'])
@decorators.tag_resources
def create(ctx, iface, resource_config, **_):
"""Creates an AWS EC2 Customer Gateway"""
# Create a copy of the resource config for clean manipulation.
params = \
dict() if not resource_config else resource_config.copy()
public_ip = params.get(PUBLIC_IP)
if not public_ip:
targ = \
utils.find_rel_by_node_type(ctx.instance, ELASTICIP_TYPE)
if targ:
public_ip = \
targ.target.instance.runtime_properties \
.get(ELASTICIP_TYPE_DEPRECATED)
params.update({PUBLIC_IP: public_ip})
# Actually create the resource
create_response = iface.create(params)['CustomerGateway']
ctx.instance.runtime_properties['create_response'] = \
utils.JsonCleanuper(create_response).to_dict()
utils.update_resource_id(ctx.instance,
create_response.get(CUSTOMERGATEWAY_ID))
@decorators.aws_resource(EC2CustomerGateway, RESOURCE_TYPE,
ignore_properties=True)
@decorators.wait_for_delete(status_deleted=['deleted'],
status_pending=['deleting'])
@decorators.untag_resources
def delete(iface, resource_config, **_):
"""Deletes an AWS EC2 Customer Gateway"""
# Create a copy of the resource config for clean manipulation.
params = \
dict() if not resource_config else resource_config.copy()
customer_gateway_id = params.get(CUSTOMERGATEWAY_ID)
if not customer_gateway_id:
customer_gateway_id = iface.resource_id
params.update({CUSTOMERGATEWAY_ID: customer_gateway_id})
iface.delete(params)
| 35.044776 | 77 | 0.68207 |
4a76b643aaa444a2ece81d418b5ae3c554e65f4d
| 15,308 |
py
|
Python
|
search/modules/mix_op.py
|
AhmadQasim/proxylessnas-dense
|
efaf150fc2a4eee2d8e9df664bad9eb4cf653c3e
|
[
"Apache-2.0"
] | null | null | null |
search/modules/mix_op.py
|
AhmadQasim/proxylessnas-dense
|
efaf150fc2a4eee2d8e9df664bad9eb4cf653c3e
|
[
"Apache-2.0"
] | null | null | null |
search/modules/mix_op.py
|
AhmadQasim/proxylessnas-dense
|
efaf150fc2a4eee2d8e9df664bad9eb4cf653c3e
|
[
"Apache-2.0"
] | 1 |
2021-05-12T04:49:28.000Z
|
2021-05-12T04:49:28.000Z
|
# ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware
# Han Cai, Ligeng Zhu, Song Han
# International Conference on Learning Representations (ICLR), 2019.
import numpy as np
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from search.modules.layers import *
# returns the different possible ops that can be performed
def build_candidate_ops(candidate_ops, in_channels, out_channels, stride, ops_order, upsample=False, dims=2):
if candidate_ops is None:
raise ValueError('please specify a candidate set')
name2ops = {
'Identity': lambda in_C, out_C, S, u, d: IdentityLayer(in_C, out_C, ops_order=ops_order),
'Zero': lambda in_C, out_C, S, u, d: ZeroLayer(stride=S, upsample=u, dims=d),
}
# add MBConv layers
# create lambda functions for creating layers in place with the input and output channels and the required strides
# set the kernel size and the expand ratio i.e. the ratio by which to expand the output
name2ops.update({
'3x3_MBConv1': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 3, S, 1),
'3x3_MBConv2': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 3, S, 2),
'3x3_MBConv3': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 3, S, 3),
'3x3_MBConv4': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 3, S, 4),
'3x3_MBConv5': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 3, S, 5),
'3x3_MBConv6': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 3, S, 6),
#######################################################################################
'5x5_MBConv1': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 5, S, 1),
'5x5_MBConv2': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 5, S, 2),
'5x5_MBConv3': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 5, S, 3),
'5x5_MBConv4': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 5, S, 4),
'5x5_MBConv5': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 5, S, 5),
'5x5_MBConv6': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 5, S, 6),
#######################################################################################
'7x7_MBConv1': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 7, S, 1),
'7x7_MBConv2': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 7, S, 2),
'7x7_MBConv3': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 7, S, 3),
'7x7_MBConv4': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 7, S, 4),
'7x7_MBConv5': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 7, S, 5),
'7x7_MBConv6': lambda in_C, out_C, S, u, d: MBInvertedConvLayer(in_C, out_C, 7, S, 6),
#######################################################################################
'3x3_ResConv': lambda in_C, out_C, S, u, d: OriginalResConvLayer(in_C, out_C, 3, S),
'5x5_ResConv': lambda in_C, out_C, S, u, d: OriginalResConvLayer(in_C, out_C, 5, S),
'7x7_ResConv': lambda in_C, out_C, S, u, d: OriginalResConvLayer(in_C, out_C, 7, S),
#######################################################################################
'1x1_TransConv': lambda in_C, out_C, S, u, d: TransConvLayer(in_C, out_C, 1, stride=S, dims=d),
'3x3_TransConv': lambda in_C, out_C, S, u, d: TransConvLayer(in_C, out_C, 3, stride=S, dims=d),
'5x5_TransConv': lambda in_C, out_C, S, u, d: TransConvLayer(in_C, out_C, 5, stride=S, dims=d),
'7x7_TransConv': lambda in_C, out_C, S, u, d: TransConvLayer(in_C, out_C, 7, stride=S, dims=d),
#######################################################################################
'1x1_Conv': lambda in_C, out_C, S, u, d: ConvLayer(in_C, out_C, 1, stride=1, upsample=u, dims=d),
'3x3_Conv': lambda in_C, out_C, S, u, d: ConvLayer(in_C, out_C, 3, stride=1, upsample=u, dims=d),
'5x5_Conv': lambda in_C, out_C, S, u, d: ConvLayer(in_C, out_C, 5, stride=1, upsample=u, dims=d)
})
return [
name2ops[name](in_channels, out_channels, stride, upsample, dims) for name in candidate_ops
]
class MixedEdge(MyModule):
# Following this idea, within an update step of the architecture parameters, we first sample two paths
# according to the multinomial distribution (p1, · · · , pN ) and mask all the other paths as if they do not
# exist. As such the number of candidates temporarily decrease from N to 2, while the path weights
# {pi} and binary gates {gi} are reset accordingly.
MODE = None # full, two, None, full_v2
def __init__(self, candidate_ops):
super(MixedEdge, self).__init__()
self.candidate_ops = nn.ModuleList(candidate_ops)
self.AP_path_alpha = Parameter(torch.Tensor(self.n_choices)) # architecture parameters
self.AP_path_wb = Parameter(torch.Tensor(self.n_choices)) # binary gates
self.active_index = [0]
self.inactive_index = None
self.log_prob = None
self.current_prob_over_ops = None
@property
def n_choices(self):
return len(self.candidate_ops)
@property
def probs_over_ops(self):
probs = F.softmax(self.AP_path_alpha, dim=0) # softmax to probability
return probs
@property
def chosen_index(self):
probs = self.probs_over_ops.data.cpu().numpy()
index = int(np.argmax(probs))
return index, probs[index]
# called in super_proxyless.py file in convert_to_normal_net function called after training has finished
@property
def chosen_op(self):
index, _ = self.chosen_index
return self.candidate_ops[index]
# no usage
@property
def random_op(self):
index = np.random.choice([_i for _i in range(self.n_choices)], 1)[0]
return self.candidate_ops[index]
# returns the entropy of the probabilities of the candidate ops, called in super_proxylessnas.py
# to find total entropy of the whole network over all candidate ops
# not used
def entropy(self, eps=1e-8):
probs = self.probs_over_ops
log_probs = torch.log(probs + eps)
entropy = - torch.sum(torch.mul(probs, log_probs))
return entropy
# checks if the active op is a zero layer
def is_zero_layer(self):
return self.active_op.is_zero_layer()
@property
def active_op(self):
""" assume only one path is active """
return self.candidate_ops[self.active_index[0]]
# sets the max probability op to active and others to inactive, called by set_chosen_op_active in super_proxyless.py
def set_chosen_op_active(self):
chosen_idx, _ = self.chosen_index
self.active_index = [chosen_idx]
self.inactive_index = [_i for _i in range(0, chosen_idx)] + \
[_i for _i in range(chosen_idx + 1, self.n_choices)]
def forward(self, x):
if MixedEdge.MODE == 'full' or MixedEdge.MODE == 'two':
output = 0
# take a product with the binarized weights of the op and add them up
# the output is found in this way
for _i in self.active_index:
oi = self.candidate_ops[_i](x)
output = output + self.AP_path_wb[_i] * oi
for _i in self.inactive_index:
oi = self.candidate_ops[_i](x)
output = output + self.AP_path_wb[_i] * oi.detach()
elif MixedEdge.MODE == 'full_v2':
def run_function(candidate_ops, active_id):
def forward(_x):
return candidate_ops[active_id](_x)
return forward
def backward_function(candidate_ops, active_id, binary_gates):
def backward(_x, _output, grad_output):
binary_grads = torch.zeros_like(binary_gates.data)
with torch.no_grad():
for k in range(len(candidate_ops)):
if k != active_id:
out_k = candidate_ops[k](_x.data)
else:
out_k = _output.data
grad_k = torch.sum(out_k * grad_output)
binary_grads[k] = grad_k
return binary_grads
return backward
output = ArchGradientFunction.apply(
x, self.AP_path_wb, run_function(self.candidate_ops, self.active_index[0]),
backward_function(self.candidate_ops, self.active_index[0], self.AP_path_wb)
)
else:
output = self.active_op(x)
return output
# simply for printing the current network ops
@property
def module_str(self):
chosen_index, probs = self.chosen_index
return 'Mix(%s, %.3f)' % (self.candidate_ops[chosen_index].module_str, probs)
@property
def config(self):
raise ValueError('not needed')
@staticmethod
def build_from_config(config):
raise ValueError('not needed')
def get_flops(self, x):
""" Only active paths taken into consideration when calculating FLOPs """
flops = 0
for i in self.active_index:
delta_flop, _ = self.candidate_ops[i].get_flops(x)
flops += delta_flop
return flops, self.forward(x)
""" """
def binarize(self):
""" prepare: active_index, inactive_index, AP_path_wb, log_prob (optional), current_prob_over_ops (optional) """
self.log_prob = None
# reset binary gates
self.AP_path_wb.data.zero_()
# binarize according to probs
probs = self.probs_over_ops
if MixedEdge.MODE == 'two':
# sample two ops according to `probs`
# this is done to reduce memory footprint, to simply use two ops
sample_op = torch.multinomial(probs.data, 2, replacement=False)
# select their weights and then use softmax to change them to probs
probs_slice = F.softmax(torch.stack([
self.AP_path_alpha[idx] for idx in sample_op
]), dim=0)
self.current_prob_over_ops = torch.zeros_like(probs)
for i, idx in enumerate(sample_op):
self.current_prob_over_ops[idx] = probs_slice[i]
# chose one to be active and the other to be inactive according to probs_slice
c = torch.multinomial(probs_slice.data, 1)[0] # 0 or 1
active_op = sample_op[c].item()
inactive_op = sample_op[1 - c].item()
self.active_index = [active_op]
self.inactive_index = [inactive_op]
# set binary gate
self.AP_path_wb.data[active_op] = 1.0
else:
sample = torch.multinomial(probs.data, 1)[0].item()
self.active_index = [sample]
self.inactive_index = [_i for _i in range(0, sample)] + \
[_i for _i in range(sample + 1, self.n_choices)]
self.log_prob = torch.log(probs[sample])
self.current_prob_over_ops = probs
# set binary gate
self.AP_path_wb.data[sample] = 1.0
# avoid over-regularization
for _i in range(self.n_choices):
for name, param in self.candidate_ops[_i].named_parameters():
param.grad = None
# basically called in the nas_manager.py during the validation phase in function gradient_step()
# after the cross entropy loss is calculated for the chosen path
def set_arch_param_grad(self):
binary_grads = self.AP_path_wb.grad.data
if self.active_op.is_zero_layer():
self.AP_path_alpha.grad = None
return
if self.AP_path_alpha.grad is None:
self.AP_path_alpha.grad = torch.zeros_like(self.AP_path_alpha.data)
if MixedEdge.MODE == 'two':
# concatenating the list of active and inactive index
involved_idx = self.active_index + self.inactive_index
probs_slice = F.softmax(torch.stack([
self.AP_path_alpha[idx] for idx in involved_idx
]), dim=0).data
# update the gradients of the weights according to equation 4 in the paper
# here again the probs_slice comes from the self.AP_path_alpha variable which contains the actual probs
# the AP_path_alpha is updated using the loss function
for i in range(2):
for j in range(2):
origin_i = involved_idx[i]
origin_j = involved_idx[j]
self.AP_path_alpha.grad.data[origin_i] += \
binary_grads[origin_j] * probs_slice[j] * (delta_ij(i, j) - probs_slice[i])
for _i, idx in enumerate(self.active_index):
self.active_index[_i] = (idx, self.AP_path_alpha.data[idx].item())
for _i, idx in enumerate(self.inactive_index):
self.inactive_index[_i] = (idx, self.AP_path_alpha.data[idx].item())
else:
probs = self.probs_over_ops.data
for i in range(self.n_choices):
for j in range(self.n_choices):
self.AP_path_alpha.grad.data[i] += binary_grads[j] * probs[j] * (delta_ij(i, j) - probs[i])
return
# Finally, as path weights are
# computed by applying softmax to the architecture parameters, we need to rescale the value of these
# two updated architecture parameters by multiplying a ratio to keep the path weights of unsampled
# paths unchanged.
def rescale_updated_arch_param(self):
if not isinstance(self.active_index[0], tuple):
assert self.active_op.is_zero_layer()
return
involved_idx = [idx for idx, _ in (self.active_index + self.inactive_index)]
old_alphas = [alpha for _, alpha in (self.active_index + self.inactive_index)]
new_alphas = [self.AP_path_alpha.data[idx] for idx in involved_idx]
offset = math.log(
sum([math.exp(alpha) for alpha in new_alphas]) / sum([math.exp(alpha) for alpha in old_alphas])
)
for idx in involved_idx:
self.AP_path_alpha.data[idx] -= offset
# only called in case of the full_v2 case
class ArchGradientFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, binary_gates, run_func, backward_func):
ctx.run_func = run_func
ctx.backward_func = backward_func
detached_x = detach_variable(x)
with torch.enable_grad():
output = run_func(detached_x)
ctx.save_for_backward(detached_x, output)
return output.data
@staticmethod
def backward(ctx, grad_output):
detached_x, output = ctx.saved_tensors
grad_x = torch.autograd.grad(output, detached_x, grad_output, only_inputs=True)
# compute gradients w.r.t. binary_gates
binary_grads = ctx.backward_func(detached_x.data, output.data, grad_output.data)
return grad_x[0], binary_grads, None, None
| 46.957055 | 120 | 0.608571 |
6a84a3b7ed9e138b9563a21a17b9a8b727518a23
| 994 |
py
|
Python
|
matury/2021pr_operon/zad43.py
|
bartekpacia/informatyka-frycz
|
6fdbbdea0c6b6a710378f22e90d467c9f91e64aa
|
[
"MIT"
] | 2 |
2021-03-06T22:09:44.000Z
|
2021-03-14T14:41:03.000Z
|
matury/2021pr_operon/zad43.py
|
bartekpacia/informatyka-frycz
|
6fdbbdea0c6b6a710378f22e90d467c9f91e64aa
|
[
"MIT"
] | 1 |
2020-03-25T15:42:47.000Z
|
2020-10-06T21:41:14.000Z
|
matury/2021pr_operon/zad43.py
|
bartekpacia/informatyka-frycz
|
6fdbbdea0c6b6a710378f22e90d467c9f91e64aa
|
[
"MIT"
] | null | null | null |
from typing import List
with open("dane.txt", "r") as f:
liczby = [int(line) for line in f.readlines()]
def remove(k: int, nums_list: List[int]):
i = k - 1
while i < len(nums_list):
del nums_list[i]
i = i + (k - 1)
def gen_lucky_numbers(high: int) -> List[int]:
lucky_nums = list(range(1, high + 1, 2))
i = 1
while True:
if i < len(lucky_nums):
remove_every_num = lucky_nums[i]
if remove_every_num > len(lucky_nums):
return lucky_nums
else:
return lucky_nums
remove(remove_every_num, lucky_nums)
i += 1
def is_prime(num: int) -> bool:
for i in range(2, (num // 2) + 1):
if num % i == 0:
return False
return True
lucky_numbers = set(gen_lucky_numbers(10_000))
both_prime_and_lucky = 0
for liczba in liczby:
if liczba in lucky_numbers and is_prime(liczba):
both_prime_and_lucky += 1
print(f"{both_prime_and_lucky=}")
| 21.148936 | 52 | 0.586519 |
52bacc1c1dbdced546629cd966121877e98f144c
| 141 |
py
|
Python
|
kleat/__init__.py
|
zyxue/kleat3
|
861b02797937eea51e99f9c29d195fb3e7dea376
|
[
"MIT"
] | null | null | null |
kleat/__init__.py
|
zyxue/kleat3
|
861b02797937eea51e99f9c29d195fb3e7dea376
|
[
"MIT"
] | null | null | null |
kleat/__init__.py
|
zyxue/kleat3
|
861b02797937eea51e99f9c29d195fb3e7dea376
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Top-level package for kleat3."""
__author__ = """Zhuyi Xue"""
__email__ = '[email protected]'
__version__ = '3.0.0'
| 17.625 | 35 | 0.609929 |
b3cc237cd091dbbf845559bae7f5c5d8c66223bb
| 7,929 |
py
|
Python
|
monailabel/datastore/utils/convert.py
|
IntroAI-termproject/MONAILabel
|
6a0fcc797e24aff1a1582088bae71973b2b6582e
|
[
"Apache-2.0"
] | 214 |
2021-04-30T15:37:47.000Z
|
2022-03-27T12:38:58.000Z
|
monailabel/datastore/utils/convert.py
|
IntroAI-termproject/MONAILabel
|
6a0fcc797e24aff1a1582088bae71973b2b6582e
|
[
"Apache-2.0"
] | 325 |
2021-04-30T15:59:16.000Z
|
2022-03-31T19:39:38.000Z
|
monailabel/datastore/utils/convert.py
|
IntroAI-termproject/MONAILabel
|
6a0fcc797e24aff1a1582088bae71973b2b6582e
|
[
"Apache-2.0"
] | 50 |
2021-05-05T13:57:45.000Z
|
2022-03-16T21:01:25.000Z
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import pathlib
import shutil
import tempfile
import time
import numpy as np
import pydicom_seg
import SimpleITK
from monai.data import write_nifti
from monai.transforms import LoadImage
from pydicom.filereader import dcmread
from monailabel.datastore.utils.colors import GENERIC_ANATOMY_COLORS
from monailabel.transform.writer import write_itk
from monailabel.utils.others.generic import run_command
logger = logging.getLogger(__name__)
def dicom_to_nifti(series_dir, is_seg=False):
start = time.time()
if is_seg:
output_file = itk_dicom_seg_to_image(series_dir)
else:
# https://simpleitk.readthedocs.io/en/master/link_DicomConvert_docs.html
if os.path.isdir(series_dir) and len(os.listdir(series_dir)) > 1:
reader = SimpleITK.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(series_dir)
reader.SetFileNames(dicom_names)
image = reader.Execute()
else:
filename = (
series_dir if not os.path.isdir(series_dir) else os.path.join(series_dir, os.listdir(series_dir)[0])
)
file_reader = SimpleITK.ImageFileReader()
file_reader.SetImageIO("GDCMImageIO")
file_reader.SetFileName(filename)
image = file_reader.Execute()
logger.info(f"Image size: {image.GetSize()}")
output_file = tempfile.NamedTemporaryFile(suffix=".nii.gz").name
SimpleITK.WriteImage(image, output_file)
logger.info(f"dicom_to_nifti latency : {time.time() - start} (sec)")
return output_file
def binary_to_image(reference_image, label, dtype=np.uint16, file_ext=".nii.gz", use_itk=True):
start = time.time()
image_np, meta_dict = LoadImage()(reference_image)
label_np = np.fromfile(label, dtype=dtype)
logger.info(f"Image: {image_np.shape}")
logger.info(f"Label: {label_np.shape}")
label_np = label_np.reshape(image_np.shape, order="F")
logger.info(f"Label (reshape): {label_np.shape}")
output_file = tempfile.NamedTemporaryFile(suffix=file_ext).name
affine = meta_dict.get("affine")
if use_itk:
write_itk(label_np, output_file, affine=affine, dtype=None, compress=True)
else:
write_nifti(label_np, output_file, affine=affine)
logger.info(f"binary_to_image latency : {time.time() - start} (sec)")
return output_file
def nifti_to_dicom_seg(series_dir, label, label_info, file_ext="*", use_itk=True):
start = time.time()
label_np, meta_dict = LoadImage()(label)
unique_labels = np.unique(label_np.flatten()).astype(np.int)
unique_labels = unique_labels[unique_labels != 0]
segment_attributes = []
for i, idx in enumerate(unique_labels):
info = label_info[i] if label_info and i < len(label_info) else {}
name = info.get("name", "unknown")
description = info.get("description", "Unknown")
rgb = list(info.get("color", GENERIC_ANATOMY_COLORS.get(name, (255, 0, 0))))[0:3]
rgb = [int(x) for x in rgb]
logger.info(f"{i} => {idx} => {name}")
segment_attribute = info.get(
"segmentAttribute",
{
"labelID": int(idx),
"SegmentLabel": name,
"SegmentDescription": description,
"SegmentAlgorithmType": "AUTOMATIC",
"SegmentAlgorithmName": "MONAILABEL",
"SegmentedPropertyCategoryCodeSequence": {
"CodeValue": "123037004",
"CodingSchemeDesignator": "SCT",
"CodeMeaning": "Anatomical Structure",
},
"SegmentedPropertyTypeCodeSequence": {
"CodeValue": "78961009",
"CodingSchemeDesignator": "SCT",
"CodeMeaning": name,
},
"recommendedDisplayRGBValue": rgb,
},
)
segment_attributes.append(segment_attribute)
template = {
"ContentCreatorName": "Reader1",
"ClinicalTrialSeriesID": "Session1",
"ClinicalTrialTimePointID": "1",
"SeriesDescription": "Segmentation",
"SeriesNumber": "300",
"InstanceNumber": "1",
"segmentAttributes": [segment_attributes],
"ContentLabel": "SEGMENTATION",
"ContentDescription": "MONAI Label - Image segmentation",
"ClinicalTrialCoordinatingCenterName": "MONAI",
"BodyPartExamined": "",
}
logger.info(json.dumps(template, indent=2))
if not segment_attributes:
logger.error("Missing Attributes/Empty Label provided")
return None
if use_itk:
output_file = itk_image_to_dicom_seg(label, series_dir, template)
else:
template = pydicom_seg.template.from_dcmqi_metainfo(template)
writer = pydicom_seg.MultiClassWriter(
template=template,
inplane_cropping=False,
skip_empty_slices=False,
skip_missing_segment=False,
)
# Read source Images
series_dir = pathlib.Path(series_dir)
image_files = series_dir.glob(file_ext)
image_datasets = [dcmread(str(f), stop_before_pixels=True) for f in image_files]
logger.info(f"Total Source Images: {len(image_datasets)}")
mask = SimpleITK.ReadImage(label)
mask = SimpleITK.Cast(mask, SimpleITK.sitkUInt16)
output_file = tempfile.NamedTemporaryFile(suffix=".dcm").name
dcm = writer.write(mask, image_datasets)
dcm.save_as(output_file)
logger.info(f"nifti_to_dicom_seg latency : {time.time() - start} (sec)")
return output_file
def itk_image_to_dicom_seg(label, series_dir, template):
output_file = tempfile.NamedTemporaryFile(suffix=".dcm").name
meta_data = tempfile.NamedTemporaryFile(suffix=".json").name
with open(meta_data, "w") as fp:
json.dump(template, fp)
command = "itkimage2segimage"
args = [
"--inputImageList",
label,
"--inputDICOMDirectory",
series_dir,
"--outputDICOM",
output_file,
"--inputMetadata",
meta_data,
]
run_command(command, args)
os.unlink(meta_data)
return output_file
def itk_dicom_seg_to_image(label, output_type="nifti"):
# TODO:: Currently supports only one file
filename = label if not os.path.isdir(label) else os.path.join(label, os.listdir(label)[0])
with tempfile.TemporaryDirectory() as output_dir:
command = "segimage2itkimage"
args = [
"--inputDICOM",
filename,
"--outputType",
output_type,
"--prefix",
"segment",
"--outputDirectory",
output_dir,
]
run_command(command, args)
output_files = [f for f in os.listdir(output_dir) if f.startswith("segment") and f.endswith(".nii.gz")]
if not output_files:
logger.warning(f"Failed to convert DICOM-SEG {label} to NIFTI")
return None
result_file = os.path.join(output_dir, output_files[0])
logger.info(f"Result/Output (NII) File: {result_file}")
output_file = tempfile.NamedTemporaryFile(suffix=".nii.gz").name
shutil.move(result_file, output_file)
return output_file
| 35.716216 | 116 | 0.646614 |
7d6f30f92534c4de5abfcb2d7a3709bff898a739
| 311 |
py
|
Python
|
conanInstall.py
|
odant/test_versions_conan_packages
|
b0f91f31cf277f0854c8fd28ed3aa4ce1109ad15
|
[
"MIT"
] | null | null | null |
conanInstall.py
|
odant/test_versions_conan_packages
|
b0f91f31cf277f0854c8fd28ed3aa4ce1109ad15
|
[
"MIT"
] | null | null | null |
conanInstall.py
|
odant/test_versions_conan_packages
|
b0f91f31cf277f0854c8fd28ed3aa4ce1109ad15
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from conanRunner import conanRunner
def conanInstall(conanfile, installFolder):
print("\n")
conanfile = str(conanfile)
installFolder = str(installFolder)
args = ["install", conanfile, "--install-folder", installFolder]
for s in conanRunner(args):
print(s)
| 25.916667 | 68 | 0.70418 |
8f1ba727572a6d53d9beb79c7e80939f8d8a4ac8
| 553 |
py
|
Python
|
2-Medium/numsSameConsecDiff.py
|
Sma-Das/Leetcode
|
6f9b8f069e2ef198408abd6780fd0697a8bebada
|
[
"MIT"
] | null | null | null |
2-Medium/numsSameConsecDiff.py
|
Sma-Das/Leetcode
|
6f9b8f069e2ef198408abd6780fd0697a8bebada
|
[
"MIT"
] | null | null | null |
2-Medium/numsSameConsecDiff.py
|
Sma-Das/Leetcode
|
6f9b8f069e2ef198408abd6780fd0697a8bebada
|
[
"MIT"
] | null | null | null |
from functools import cache
def numsSameConsecDiff(n, k):
@cache
def builder(curr, rem, delta, string=""):
if not 0 <= curr <= 9:
return ""
elif not rem:
return string
return builder(curr+delta, rem-1, delta, string + str(curr)) + builder(curr-delta, rem-1, -delta, string + str(curr))
res = set()
for i in range(1, 10):
v = builder(i, n, k)
[res.add(v[c:c+n]) for c in range(0, len(v), n)]
return res
if __name__ == '__main__':
print(numsSameConsecDiff(8, 1))
| 26.333333 | 125 | 0.562387 |
1fb266c18d648c6ac8f7016bc6b5948cf8a44bf3
| 20,274 |
py
|
Python
|
intersight/models/storage_flex_util_virtual_drive.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/storage_flex_util_virtual_drive.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/storage_flex_util_virtual_drive.py
|
ategaw-cisco/intersight-python
|
9d6476620507281b1dc358e29ac452d56081bbb0
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StorageFlexUtilVirtualDrive(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account_moid': 'str',
'ancestors': 'list[MoBaseMoRef]',
'create_time': 'datetime',
'mod_time': 'datetime',
'moid': 'str',
'object_type': 'str',
'owners': 'list[str]',
'parent': 'MoBaseMoRef',
'tags': 'list[MoTag]',
'version_context': 'MoVersionContext',
'device_mo_id': 'str',
'dn': 'str',
'rn': 'str',
'drive_status': 'str',
'drive_type': 'str',
'partition_id': 'str',
'partition_name': 'str',
'registered_device': 'AssetDeviceRegistrationRef',
'resident_image': 'str',
'size': 'str',
'storage_flex_util_controller': 'StorageFlexUtilControllerRef',
'virtual_drive': 'str'
}
attribute_map = {
'account_moid': 'AccountMoid',
'ancestors': 'Ancestors',
'create_time': 'CreateTime',
'mod_time': 'ModTime',
'moid': 'Moid',
'object_type': 'ObjectType',
'owners': 'Owners',
'parent': 'Parent',
'tags': 'Tags',
'version_context': 'VersionContext',
'device_mo_id': 'DeviceMoId',
'dn': 'Dn',
'rn': 'Rn',
'drive_status': 'DriveStatus',
'drive_type': 'DriveType',
'partition_id': 'PartitionId',
'partition_name': 'PartitionName',
'registered_device': 'RegisteredDevice',
'resident_image': 'ResidentImage',
'size': 'Size',
'storage_flex_util_controller': 'StorageFlexUtilController',
'virtual_drive': 'VirtualDrive'
}
def __init__(self, account_moid=None, ancestors=None, create_time=None, mod_time=None, moid=None, object_type=None, owners=None, parent=None, tags=None, version_context=None, device_mo_id=None, dn=None, rn=None, drive_status=None, drive_type=None, partition_id=None, partition_name=None, registered_device=None, resident_image=None, size=None, storage_flex_util_controller=None, virtual_drive=None):
"""
StorageFlexUtilVirtualDrive - a model defined in Swagger
"""
self._account_moid = None
self._ancestors = None
self._create_time = None
self._mod_time = None
self._moid = None
self._object_type = None
self._owners = None
self._parent = None
self._tags = None
self._version_context = None
self._device_mo_id = None
self._dn = None
self._rn = None
self._drive_status = None
self._drive_type = None
self._partition_id = None
self._partition_name = None
self._registered_device = None
self._resident_image = None
self._size = None
self._storage_flex_util_controller = None
self._virtual_drive = None
if account_moid is not None:
self.account_moid = account_moid
if ancestors is not None:
self.ancestors = ancestors
if create_time is not None:
self.create_time = create_time
if mod_time is not None:
self.mod_time = mod_time
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
if owners is not None:
self.owners = owners
if parent is not None:
self.parent = parent
if tags is not None:
self.tags = tags
if version_context is not None:
self.version_context = version_context
if device_mo_id is not None:
self.device_mo_id = device_mo_id
if dn is not None:
self.dn = dn
if rn is not None:
self.rn = rn
if drive_status is not None:
self.drive_status = drive_status
if drive_type is not None:
self.drive_type = drive_type
if partition_id is not None:
self.partition_id = partition_id
if partition_name is not None:
self.partition_name = partition_name
if registered_device is not None:
self.registered_device = registered_device
if resident_image is not None:
self.resident_image = resident_image
if size is not None:
self.size = size
if storage_flex_util_controller is not None:
self.storage_flex_util_controller = storage_flex_util_controller
if virtual_drive is not None:
self.virtual_drive = virtual_drive
@property
def account_moid(self):
"""
Gets the account_moid of this StorageFlexUtilVirtualDrive.
The Account ID for this managed object.
:return: The account_moid of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._account_moid
@account_moid.setter
def account_moid(self, account_moid):
"""
Sets the account_moid of this StorageFlexUtilVirtualDrive.
The Account ID for this managed object.
:param account_moid: The account_moid of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._account_moid = account_moid
@property
def ancestors(self):
"""
Gets the ancestors of this StorageFlexUtilVirtualDrive.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this StorageFlexUtilVirtualDrive.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this StorageFlexUtilVirtualDrive.
Ancestors is an array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this StorageFlexUtilVirtualDrive.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def create_time(self):
"""
Gets the create_time of this StorageFlexUtilVirtualDrive.
The time when this managed object was created.
:return: The create_time of this StorageFlexUtilVirtualDrive.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""
Sets the create_time of this StorageFlexUtilVirtualDrive.
The time when this managed object was created.
:param create_time: The create_time of this StorageFlexUtilVirtualDrive.
:type: datetime
"""
self._create_time = create_time
@property
def mod_time(self):
"""
Gets the mod_time of this StorageFlexUtilVirtualDrive.
The time when this managed object was last modified.
:return: The mod_time of this StorageFlexUtilVirtualDrive.
:rtype: datetime
"""
return self._mod_time
@mod_time.setter
def mod_time(self, mod_time):
"""
Sets the mod_time of this StorageFlexUtilVirtualDrive.
The time when this managed object was last modified.
:param mod_time: The mod_time of this StorageFlexUtilVirtualDrive.
:type: datetime
"""
self._mod_time = mod_time
@property
def moid(self):
"""
Gets the moid of this StorageFlexUtilVirtualDrive.
A unique identifier of this Managed Object instance.
:return: The moid of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this StorageFlexUtilVirtualDrive.
A unique identifier of this Managed Object instance.
:param moid: The moid of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this StorageFlexUtilVirtualDrive.
The fully-qualified type of this managed object, e.g. the class name.
:return: The object_type of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this StorageFlexUtilVirtualDrive.
The fully-qualified type of this managed object, e.g. the class name.
:param object_type: The object_type of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._object_type = object_type
@property
def owners(self):
"""
Gets the owners of this StorageFlexUtilVirtualDrive.
An array of owners which represent effective ownership of this object.
:return: The owners of this StorageFlexUtilVirtualDrive.
:rtype: list[str]
"""
return self._owners
@owners.setter
def owners(self, owners):
"""
Sets the owners of this StorageFlexUtilVirtualDrive.
An array of owners which represent effective ownership of this object.
:param owners: The owners of this StorageFlexUtilVirtualDrive.
:type: list[str]
"""
self._owners = owners
@property
def parent(self):
"""
Gets the parent of this StorageFlexUtilVirtualDrive.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this StorageFlexUtilVirtualDrive.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this StorageFlexUtilVirtualDrive.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this StorageFlexUtilVirtualDrive.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def tags(self):
"""
Gets the tags of this StorageFlexUtilVirtualDrive.
An array of tags, which allow to add key, value meta-data to managed objects.
:return: The tags of this StorageFlexUtilVirtualDrive.
:rtype: list[MoTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this StorageFlexUtilVirtualDrive.
An array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this StorageFlexUtilVirtualDrive.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this StorageFlexUtilVirtualDrive.
The versioning info for this managed object
:return: The version_context of this StorageFlexUtilVirtualDrive.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this StorageFlexUtilVirtualDrive.
The versioning info for this managed object
:param version_context: The version_context of this StorageFlexUtilVirtualDrive.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def device_mo_id(self):
"""
Gets the device_mo_id of this StorageFlexUtilVirtualDrive.
:return: The device_mo_id of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._device_mo_id
@device_mo_id.setter
def device_mo_id(self, device_mo_id):
"""
Sets the device_mo_id of this StorageFlexUtilVirtualDrive.
:param device_mo_id: The device_mo_id of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._device_mo_id = device_mo_id
@property
def dn(self):
"""
Gets the dn of this StorageFlexUtilVirtualDrive.
:return: The dn of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._dn
@dn.setter
def dn(self, dn):
"""
Sets the dn of this StorageFlexUtilVirtualDrive.
:param dn: The dn of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._dn = dn
@property
def rn(self):
"""
Gets the rn of this StorageFlexUtilVirtualDrive.
:return: The rn of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._rn
@rn.setter
def rn(self, rn):
"""
Sets the rn of this StorageFlexUtilVirtualDrive.
:param rn: The rn of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._rn = rn
@property
def drive_status(self):
"""
Gets the drive_status of this StorageFlexUtilVirtualDrive.
:return: The drive_status of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._drive_status
@drive_status.setter
def drive_status(self, drive_status):
"""
Sets the drive_status of this StorageFlexUtilVirtualDrive.
:param drive_status: The drive_status of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._drive_status = drive_status
@property
def drive_type(self):
"""
Gets the drive_type of this StorageFlexUtilVirtualDrive.
:return: The drive_type of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._drive_type
@drive_type.setter
def drive_type(self, drive_type):
"""
Sets the drive_type of this StorageFlexUtilVirtualDrive.
:param drive_type: The drive_type of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._drive_type = drive_type
@property
def partition_id(self):
"""
Gets the partition_id of this StorageFlexUtilVirtualDrive.
:return: The partition_id of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._partition_id
@partition_id.setter
def partition_id(self, partition_id):
"""
Sets the partition_id of this StorageFlexUtilVirtualDrive.
:param partition_id: The partition_id of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._partition_id = partition_id
@property
def partition_name(self):
"""
Gets the partition_name of this StorageFlexUtilVirtualDrive.
:return: The partition_name of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._partition_name
@partition_name.setter
def partition_name(self, partition_name):
"""
Sets the partition_name of this StorageFlexUtilVirtualDrive.
:param partition_name: The partition_name of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._partition_name = partition_name
@property
def registered_device(self):
"""
Gets the registered_device of this StorageFlexUtilVirtualDrive.
:return: The registered_device of this StorageFlexUtilVirtualDrive.
:rtype: AssetDeviceRegistrationRef
"""
return self._registered_device
@registered_device.setter
def registered_device(self, registered_device):
"""
Sets the registered_device of this StorageFlexUtilVirtualDrive.
:param registered_device: The registered_device of this StorageFlexUtilVirtualDrive.
:type: AssetDeviceRegistrationRef
"""
self._registered_device = registered_device
@property
def resident_image(self):
"""
Gets the resident_image of this StorageFlexUtilVirtualDrive.
:return: The resident_image of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._resident_image
@resident_image.setter
def resident_image(self, resident_image):
"""
Sets the resident_image of this StorageFlexUtilVirtualDrive.
:param resident_image: The resident_image of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._resident_image = resident_image
@property
def size(self):
"""
Gets the size of this StorageFlexUtilVirtualDrive.
:return: The size of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._size
@size.setter
def size(self, size):
"""
Sets the size of this StorageFlexUtilVirtualDrive.
:param size: The size of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._size = size
@property
def storage_flex_util_controller(self):
"""
Gets the storage_flex_util_controller of this StorageFlexUtilVirtualDrive.
:return: The storage_flex_util_controller of this StorageFlexUtilVirtualDrive.
:rtype: StorageFlexUtilControllerRef
"""
return self._storage_flex_util_controller
@storage_flex_util_controller.setter
def storage_flex_util_controller(self, storage_flex_util_controller):
"""
Sets the storage_flex_util_controller of this StorageFlexUtilVirtualDrive.
:param storage_flex_util_controller: The storage_flex_util_controller of this StorageFlexUtilVirtualDrive.
:type: StorageFlexUtilControllerRef
"""
self._storage_flex_util_controller = storage_flex_util_controller
@property
def virtual_drive(self):
"""
Gets the virtual_drive of this StorageFlexUtilVirtualDrive.
:return: The virtual_drive of this StorageFlexUtilVirtualDrive.
:rtype: str
"""
return self._virtual_drive
@virtual_drive.setter
def virtual_drive(self, virtual_drive):
"""
Sets the virtual_drive of this StorageFlexUtilVirtualDrive.
:param virtual_drive: The virtual_drive of this StorageFlexUtilVirtualDrive.
:type: str
"""
self._virtual_drive = virtual_drive
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StorageFlexUtilVirtualDrive):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.382609 | 403 | 0.623705 |
c732c12f87e3aa35753087b68822ce20532f469c
| 6,838 |
py
|
Python
|
sdk/security/azure-mgmt-security/azure/mgmt/security/aio/operations/_security_solutions_reference_data_operations.py
|
adewaleo/azure-sdk-for-python
|
169457edbea5e3c5557246cfcf8bd635d528bae4
|
[
"MIT"
] | 1 |
2020-03-05T18:10:35.000Z
|
2020-03-05T18:10:35.000Z
|
sdk/security/azure-mgmt-security/azure/mgmt/security/aio/operations/_security_solutions_reference_data_operations.py
|
adewaleo/azure-sdk-for-python
|
169457edbea5e3c5557246cfcf8bd635d528bae4
|
[
"MIT"
] | 2 |
2020-03-03T23:11:13.000Z
|
2020-03-30T18:50:55.000Z
|
sdk/security/azure-mgmt-security/azure/mgmt/security/aio/operations/_security_solutions_reference_data_operations.py
|
adewaleo/azure-sdk-for-python
|
169457edbea5e3c5557246cfcf8bd635d528bae4
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SecuritySolutionsReferenceDataOperations:
"""SecuritySolutionsReferenceDataOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
**kwargs
) -> "models.SecuritySolutionsReferenceDataList":
"""Gets a list of all supported Security Solutions for the subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecuritySolutionsReferenceDataList, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.SecuritySolutionsReferenceDataList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SecuritySolutionsReferenceDataList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecuritySolutionsReferenceDataList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/securitySolutionsReferenceData'} # type: ignore
async def list_by_home_region(
self,
**kwargs
) -> "models.SecuritySolutionsReferenceDataList":
"""Gets list of all supported Security Solutions for subscription and location.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecuritySolutionsReferenceDataList, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.SecuritySolutionsReferenceDataList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SecuritySolutionsReferenceDataList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
# Construct URL
url = self.list_by_home_region.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'ascLocation': self._serialize.url("self._config.asc_location", self._config.asc_location, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecuritySolutionsReferenceDataList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_home_region.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/locations/{ascLocation}/securitySolutionsReferenceData'} # type: ignore
| 47.818182 | 184 | 0.686751 |
09bfb25c937cc010066059dc97724cf30acd4bc1
| 3,713 |
py
|
Python
|
sdks/python/http_client/v1/polyaxon_sdk/models/v1_parallel_kind.py
|
jjasonkal/polyaxon
|
8454b29b2b971b965de8a7bf63afdd48f07d6d53
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/polyaxon_sdk/models/v1_parallel_kind.py
|
jjasonkal/polyaxon
|
8454b29b2b971b965de8a7bf63afdd48f07d6d53
|
[
"Apache-2.0"
] | null | null | null |
sdks/python/http_client/v1/polyaxon_sdk/models/v1_parallel_kind.py
|
jjasonkal/polyaxon
|
8454b29b2b971b965de8a7bf63afdd48f07d6d53
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.0.89
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1ParallelKind(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
RANDOM = "random"
GRID = "grid"
HYPERBAND = "hyperband"
BAYES = "bayes"
HYPEROPT = "hyperopt"
ITERATIVE = "iterative"
MAPPING = "mapping"
allowable_values = [
RANDOM,
GRID,
HYPERBAND,
BAYES,
HYPEROPT,
ITERATIVE,
MAPPING,
] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {}
attribute_map = {}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""V1ParallelKind - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ParallelKind):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ParallelKind):
return True
return self.to_dict() != other.to_dict()
| 28.343511 | 85 | 0.590897 |
12a353f670f36552a74937a8d7515f410d0e48af
| 5,059 |
py
|
Python
|
tests/bdd/features/volume/nexus-info/test_feature.py
|
Abhinandan-Purkait-Bolt/mayastor-control-plane
|
ae4b39b42cc54bf8520f0256568e5d8635eb4f39
|
[
"Apache-2.0"
] | null | null | null |
tests/bdd/features/volume/nexus-info/test_feature.py
|
Abhinandan-Purkait-Bolt/mayastor-control-plane
|
ae4b39b42cc54bf8520f0256568e5d8635eb4f39
|
[
"Apache-2.0"
] | 1 |
2022-03-31T10:26:45.000Z
|
2022-03-31T10:26:45.000Z
|
tests/bdd/features/volume/nexus-info/test_feature.py
|
Abhinandan-Purkait-Bolt/mayastor-control-plane
|
ae4b39b42cc54bf8520f0256568e5d8635eb4f39
|
[
"Apache-2.0"
] | null | null | null |
"""Persistent Nexus Info feature tests."""
from pytest_bdd import (
given,
scenario,
then,
when,
)
import pytest
from common.deployer import Deployer
from common.apiclient import ApiClient
from common.etcd import Etcd
from openapi.model.create_pool_body import CreatePoolBody
from openapi.model.create_volume_body import CreateVolumeBody
from openapi.model.protocol import Protocol
from openapi.model.volume_policy import VolumePolicy
from openapi.exceptions import NotFoundException
POOL_UUID = "4cc6ee64-7232-497d-a26f-38284a444980"
VOLUME_UUID = "5cd5378e-3f05-47f1-a830-a0f5873a1449"
NODE_NAME = "mayastor-1"
VOLUME_CTX_KEY = "volume"
VOLUME_CTX_KEY_OLD = "volume-old"
VOLUME_SIZE = 10485761
ETCD_CLIENT = Etcd()
# Fixture used to pass the volume context between test steps.
@pytest.fixture(scope="function")
def volume_ctx():
return {}
@pytest.fixture(autouse=True, scope="module")
def init():
Deployer.start(1)
ApiClient.pools_api().put_node_pool(
NODE_NAME, POOL_UUID, CreatePoolBody(["malloc:///disk?size_mb=50"])
)
yield
Deployer.stop()
@scenario("feature.feature", "Deleting a published volume")
def test_deleting_a_published_volume():
"""Deleting a published volume."""
@scenario("feature.feature", "publishing a volume")
def test_publishing_a_volume():
"""publishing a volume."""
@scenario("feature.feature", "re-publishing a volume")
def test_republishing_a_volume():
"""re-publishing a volume."""
@scenario("feature.feature", "unpublishing a volume")
def test_unpublishing_a_volume():
"""unpublishing a volume."""
@given("a volume that has been published and unpublished")
def a_volume_that_has_been_published_and_unpublished(volume_ctx):
"""a volume that has been published and unpublished."""
# This volume will become the old entry after re-publishing the volume
volume_ctx[VOLUME_CTX_KEY_OLD] = publish_volume()
unpublish_volume()
@given("a volume that is not published")
def a_volume_that_is_not_published():
"""a volume that is not published."""
volume = ApiClient.volumes_api().get_volume(VOLUME_UUID)
assert not hasattr(volume.spec, "target")
@given("a volume that is published")
def a_volume_that_is_published(volume_ctx):
"""a volume that is published."""
volume_ctx[VOLUME_CTX_KEY] = publish_volume()
@given("an existing volume")
def an_existing_volume(an_existing_volume):
"""an existing volume."""
@when("the volume is deleted")
def the_volume_is_deleted():
"""the volume is deleted."""
ApiClient.volumes_api().del_volume(VOLUME_UUID)
@when("the volume is published")
def the_volume_is_published(volume_ctx):
"""the volume is published."""
volume_ctx[VOLUME_CTX_KEY] = publish_volume()
@when("the volume is re-published")
def the_volume_is_republished(volume_ctx):
"""the volume is re-published."""
volume_ctx[VOLUME_CTX_KEY] = publish_volume()
@when("the volume is unpublished")
def the_volume_is_unpublished():
"""the volume is unpublished."""
unpublish_volume()
@then("the nexus info structure should be present in the persistent store")
def the_nexus_info_structure_should_be_present_in_the_persistent_store(volume_ctx):
"""the nexus info structure should be present in the persistent store."""
volume = volume_ctx[VOLUME_CTX_KEY]
nexus_uuid = volume.state.target["uuid"]
assert ETCD_CLIENT.get_nexus_info(VOLUME_UUID, nexus_uuid) is not None
@then("the nexus info structure should not be present in the persistent store")
def the_nexus_info_structure_should_not_be_present_in_the_persistent_store(volume_ctx):
"""the nexus info structure should not be present in the persistent store."""
volume = volume_ctx[VOLUME_CTX_KEY]
nexus_uuid = volume.state.target["uuid"]
assert ETCD_CLIENT.get_nexus_info(VOLUME_UUID, nexus_uuid) is None
@then("the old nexus info structure should not be present in the persistent store")
def the_old_nexus_info_structure_should_not_be_present_in_the_persistent_store(
volume_ctx,
):
"""the old nexus info structure should not be present in the persistent store."""
old_volume = volume_ctx[VOLUME_CTX_KEY_OLD]
nexus_uuid = old_volume.state.target["uuid"]
assert ETCD_CLIENT.get_nexus_info(VOLUME_UUID, nexus_uuid) is None
@pytest.fixture
def an_existing_volume():
volume = ApiClient.volumes_api().put_volume(
VOLUME_UUID, CreateVolumeBody(VolumePolicy(False), 1, VOLUME_SIZE)
)
yield
try:
ApiClient.volumes_api().del_volume(volume.spec.uuid)
except NotFoundException:
# If the volume is not found it was already deleted, so carry on.
pass
# Publish the volume
def publish_volume():
volume = ApiClient.volumes_api().put_volume_target(
VOLUME_UUID, NODE_NAME, Protocol("nvmf")
)
assert hasattr(volume.state, "target")
return volume
# Unpublish the volume
def unpublish_volume():
volume = ApiClient.volumes_api().del_volume_target(VOLUME_UUID)
assert not hasattr(volume.spec, "target")
| 29.934911 | 87 | 0.749951 |
1ee91f0e50d1aa820f8c45da5b0bff6499f63b25
| 12,937 |
py
|
Python
|
software/build_tools/ramfs.py
|
mike42/65816-computer
|
9250b6dedc4757e8548d8dd6defce34ca6e03a8f
|
[
"CC-BY-4.0"
] | null | null | null |
software/build_tools/ramfs.py
|
mike42/65816-computer
|
9250b6dedc4757e8548d8dd6defce34ca6e03a8f
|
[
"CC-BY-4.0"
] | null | null | null |
software/build_tools/ramfs.py
|
mike42/65816-computer
|
9250b6dedc4757e8548d8dd6defce34ca6e03a8f
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
"""
ramfs.py: Create ramfs images for 65816 computer.
"""
import argparse
import os.path
import sys
from dataclasses import dataclass
from struct import unpack_from, pack, unpack
from typing import Optional
@dataclass
class Inode:
SIZE_BYTES = 128
TYPE_FREE = 0
TYPE_FILE = 1
TYPE_DIR = 2
MAX_CHILDREN = 43
inode_id: int
parent_inode_id: int
type_mode: int
user_id: int
group_id: int
size_bytes: int
inode_name: bytes
children: list[int]
def is_free(self):
return self._get_type() == Inode.TYPE_FREE
def is_file(self):
return self._get_type() == Inode.TYPE_FILE
def is_directory(self):
return self._get_type() == Inode.TYPE_DIR
def _get_type(self):
return (self.type_mode & 0xF000) >> 12
def add_child_inode_id(self, new_inode_id: int):
for i in range(0, len(self.children)):
if self.children[i] == 0:
self.children[i] = new_inode_id
return
raise Exception("Problems")
def to_bytes(self) -> bytearray:
name = self.inode_name.ljust(32, b"\x00")
data = pack("<HHHHH32s",
self.parent_inode_id,
self.type_mode,
self.user_id,
self.group_id,
self.size_bytes,
name) + \
pack("<43H", *self.children)
assert len(data) == Inode.SIZE_BYTES
return bytearray(data)
@staticmethod
def from_bytes(inode_id: int, data: bytearray):
assert len(data) == Inode.SIZE_BYTES
parent_inode_id, type_mode, user_id, group_id, size_bytes, inode_name = unpack_from("<HHHHH32s", data)
children = unpack_from("<43H", data, 42)
inode_name = inode_name.rstrip(b"\x00")
return Inode(inode_id, parent_inode_id, type_mode, user_id, group_id, size_bytes, inode_name,
list(children))
@staticmethod
def for_directory(inode_id: int, parent_inode_id: int, name: bytes, user_id: int = 0, group_id: int = 0,
mode=0o0755):
children = [0] * Inode.MAX_CHILDREN
inode_name = name
type_mode = (Inode.TYPE_DIR << 12) + (mode & 0x0FFF)
return Inode(inode_id=inode_id,
parent_inode_id=parent_inode_id,
type_mode=type_mode,
user_id=user_id,
group_id=group_id,
size_bytes=0,
inode_name=inode_name,
children=children)
@staticmethod
def for_file(inode_id: int, parent_inode_id: int, name: bytes, size: int=0, sectors: list[int] = [], user_id: int = 0, group_id: int = 0,
mode=0o0755):
children = sectors + [0] * (Inode.MAX_CHILDREN - len(sectors))
inode_name = name
type_mode = (Inode.TYPE_DIR << 12) + (mode & 0x0FFF)
return Inode(inode_id=inode_id,
parent_inode_id=parent_inode_id,
type_mode=type_mode,
user_id=user_id,
group_id=group_id,
size_bytes=size,
inode_name=inode_name,
children=children)
class PathWrapper:
components: list[bytes]
def __init__(self, path: str):
path_components = path.encode('ascii').strip(b'/').split(b'/')
tmp_components = []
for item in path_components:
if len(item) == 0:
continue
elif len(item) > 32:
raise Exception(f"Path component length cannot exceed 32: '{item}'")
elif item == ".":
continue
elif item == "..":
if len(tmp_components) > 0:
tmp_components.pop()
else:
raise Exception(f"Path is outside root directory: '{item}'")
else:
tmp_components.append(item)
self.components = tmp_components
class RamFs:
"""
Wrapper for byte array of FS data
"""
ROOT_INODE = 0
SECTOR_SIZE_BYTES = 256
SECTOR_COUNT = 2048
BITMAP_SECTOR_ID = 255
FIRST_CONTENT_SECTOR = 256
data: bytearray
def __init__(self, filename=None):
if filename:
raise Exception("Not implemented")
else:
self.data = bytearray(RamFs.SECTOR_SIZE_BYTES * RamFs.SECTOR_COUNT)
root_inode_id = self.get_next_free_inode_id()
assert root_inode_id == 0
root_dir = Inode.for_directory(inode_id=RamFs.ROOT_INODE, parent_inode_id=RamFs.ROOT_INODE, name=b"")
self._write_inode(root_dir)
def get_next_free_inode_id(self) -> int:
"""
Allocate an inode, returns number of allocated inode
"""
# Number of inodes which will fit before the bitmap
fs_inode_count = (RamFs.BITMAP_SECTOR_ID * RamFs.SECTOR_SIZE_BYTES) // Inode.SIZE_BYTES
for inode_id in range(0, fs_inode_count):
inode = self._read_inode(inode_id)
if inode.is_free():
return inode_id
return -1
def _read_inode(self, inode_id: int) -> Inode:
inode_start_byte = inode_id * Inode.SIZE_BYTES
inode_bytes = self.data[inode_start_byte:inode_start_byte + Inode.SIZE_BYTES]
return Inode.from_bytes(inode_id, inode_bytes)
def _get_child_by_name(self, inode: Inode, name: bytes) -> Optional[Inode]:
if not inode.is_directory():
raise Exception(f"Path {name} is not a directory")
for child_inode_id in inode.children:
if child_inode_id != 0:
child_inode = self._read_inode(child_inode_id)
if child_inode.inode_name == name:
return child_inode
return None
def _write_inode(self, inode: Inode):
inode_start_byte = inode.inode_id * Inode.SIZE_BYTES
self.data[inode_start_byte:inode_start_byte + Inode.SIZE_BYTES] = inode.to_bytes()
def stat(self, filename) -> Inode:
path = PathWrapper(filename)
return self._read_inode_by_name(path)
def mkdir(self, filename):
path = PathWrapper(filename)
parent_inode = self._read_inode_by_name(path, parent=True)
new_directory = Inode.for_directory(self.get_next_free_inode_id(),
parent_inode_id=parent_inode.inode_id,
name=path.components.pop(0))
self._write_inode(new_directory)
parent_inode.add_child_inode_id(new_directory.inode_id)
self._write_inode(parent_inode)
def _read_inode_by_name(self, path: PathWrapper, parent=False) -> Inode:
# Start at the root
current_inode = self._read_inode(RamFs.ROOT_INODE)
# Navigate down to the requested inode
while len(path.components) > 0:
# Parent directory was requested, return it
if len(path.components) == 1 and parent:
return current_inode
next_path = path.components.pop(0)
next_inode = self._get_child_by_name(current_inode, next_path)
if next_inode is None:
raise Exception(f"Path {next_path} does not exist")
current_inode = next_inode
return current_inode
def ls(self, filename="") -> list[Inode]:
path = PathWrapper(filename)
directory = self._read_inode_by_name(path)
result = []
for child_inode_id in directory.children:
if child_inode_id != 0:
child_inode = self._read_inode(child_inode_id)
result.append(child_inode)
return result
def save_to_disk(self, filename: str):
""" Write entire FS to disk """
with open(filename, 'wb') as out:
out.write(self.data)
def add_file(self, filename, content: bytes):
path = PathWrapper(filename)
parent_inode = self._read_inode_by_name(path, parent=True)
file_size = len(content)
sector_ids = []
for i in range(0, len(content), RamFs.SECTOR_SIZE_BYTES):
sector_id = self._alloc_sector_id()
sector_ids.append(sector_id)
sector_data = bytearray(content[i:i+RamFs.SECTOR_SIZE_BYTES].ljust(RamFs.SECTOR_SIZE_BYTES, b"\x00"))
self._write_sector(sector_id, sector_data)
new_file = Inode.for_file(self.get_next_free_inode_id(),
parent_inode_id=parent_inode.inode_id,
name=path.components.pop(0),
size=file_size,
sectors=sector_ids)
self._write_inode(new_file)
parent_inode.add_child_inode_id(new_file.inode_id)
self._write_inode(parent_inode)
""" Does absolutely nothing """
pass
def _alloc_sector_id(self) -> int:
"""
Allocate a sector, returns sector id
"""
bitmap_start = RamFs.BITMAP_SECTOR_ID * RamFs.SECTOR_SIZE_BYTES
bitmap_len = (RamFs.SECTOR_COUNT - RamFs.FIRST_CONTENT_SECTOR) // 16 # Read 16 bits (one word) at a time
# Start at first content sector - skips 16 16-bit words
sector_idx = RamFs.FIRST_CONTENT_SECTOR
for bitmap_idx in range(0, bitmap_len):
this_entry_offset = bitmap_start + bitmap_idx * 2
this_entry, = unpack_from("<H", self.data, this_entry_offset) # Note tuple unpack
if this_entry == 65535:
# Short-circuit if all 16 sectors in this entry are allocated
sector_idx = sector_idx + 1
continue
# One of these 16 bits indicates an unallocated sector, find it..
test_val = 1
for i in range(0, 16):
masked_val = this_entry & test_val
if masked_val == 0:
this_entry = this_entry | test_val
self.data[this_entry_offset:this_entry_offset + 2] = pack("<H", this_entry)
return sector_idx
test_val = test_val << 1
sector_idx = sector_idx + 1
raise Exception("No unallocated sectors remaining")
def _write_sector(self, sector_id: int, sector_bytes: bytearray):
assert len(sector_bytes) == RamFs.SECTOR_SIZE_BYTES
sector_start_byte = sector_id * RamFs.SECTOR_SIZE_BYTES
self.data[sector_start_byte:sector_start_byte + RamFs.SECTOR_SIZE_BYTES] = sector_bytes
def _read_sector(self, sector_id: int) -> bytearray:
assert (sector_id >= RamFs.FIRST_CONTENT_SECTOR) and (sector_id < RamFs.SECTOR_COUNT)
sector_start_byte = sector_id * RamFs.SECTOR_SIZE_BYTES
return self.data[sector_start_byte:sector_start_byte + RamFs.SECTOR_SIZE_BYTES]
def get_file(self, filename) -> bytes:
""" Retrieve a file from the filesystem"""
path = PathWrapper(filename)
inode = self._read_inode_by_name(path)
result = b""
remaining_bytes = inode.size_bytes
for sector_id in inode.children:
if sector_id == 0:
continue
full_sector_content = self._read_sector(sector_id)
if remaining_bytes < RamFs.SECTOR_SIZE_BYTES:
result = result + full_sector_content[0:remaining_bytes]
remaining_bytes = 0
else:
result = result + full_sector_content
remaining_bytes = remaining_bytes - RamFs.SECTOR_SIZE_BYTES
assert len(result) == inode.size_bytes
return result
def create(filename: str, files: list[str]):
""" Create new archive """
fs = RamFs()
# Add files something like this
for file in files:
_add_recursive(fs, file)
fs.save_to_disk(filename)
pass
def _add_recursive(fs: RamFs, file: str):
if os.path.isdir(file):
fs.mkdir(file)
for subfile in os.listdir(file):
_add_recursive(fs, os.path.join(file, subfile))
else:
fs.add_file(file, bytearray(open(file, 'rb').read()))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create ramfs images for 65816 computer.')
action_group = parser.add_mutually_exclusive_group(required=True)
action_group.add_argument('--create', help='create image', action='store_true')
action_group.add_argument('--extract', help='extract image', action='store_true')
parser.add_argument('--file', metavar='output.bin', type=str, required=True, help='image file to work on')
parser.add_argument('fs_file', metavar='FILE', type=str, nargs='*', help='files to add')
args = parser.parse_args(sys.argv[1:])
if args.create:
create(args.file, args.fs_file)
if args.extract:
raise Exception("Not implemented")
| 38.61791 | 141 | 0.606168 |
6f40b78353ff5393194cfabced9ef9c0cf98a891
| 7,974 |
py
|
Python
|
radian/lineedit/prompt.py
|
randy3k/ride
|
8a052daebaa8f03a9fff95eb38d45a32ac43bed9
|
[
"MIT"
] | 1,051 |
2018-12-20T19:35:43.000Z
|
2022-03-31T19:44:35.000Z
|
radian/lineedit/prompt.py
|
randy3k/ride
|
8a052daebaa8f03a9fff95eb38d45a32ac43bed9
|
[
"MIT"
] | 255 |
2018-12-19T13:51:01.000Z
|
2022-03-31T09:33:43.000Z
|
radian/lineedit/prompt.py
|
randy3k/ride
|
8a052daebaa8f03a9fff95eb38d45a32ac43bed9
|
[
"MIT"
] | 54 |
2019-03-13T02:25:31.000Z
|
2022-03-15T16:21:50.000Z
|
from .buffer import ModalBuffer
from prompt_toolkit import PromptSession
from prompt_toolkit.application import Application
from prompt_toolkit.application.current import get_app
from prompt_toolkit.auto_suggest import DynamicAutoSuggest
from prompt_toolkit.completion import DynamicCompleter, ThreadedCompleter
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import Condition, emacs_mode
from prompt_toolkit.key_binding.key_bindings import \
KeyBindings, DynamicKeyBindings, merge_key_bindings
from prompt_toolkit.validation import DynamicValidator
from prompt_toolkit.shortcuts.prompt import is_true, CompleteStyle
from prompt_toolkit.utils import to_str
from collections import OrderedDict
from typing import cast
# TODO: allow lines from different modes when replying history
class ModeSpec():
def __init__(
self,
name,
on_activated=None,
on_dectivated=None,
keep_history=True,
history_book=None,
prompt_key_bindings=None,
**kwargs):
self.name = name
self.on_activated = on_activated
self.on_dectivated = on_dectivated
self.keep_history = keep_history
if history_book:
self.history_book = history_book
else:
self.history_book = name
self.prompt_key_bindings = prompt_key_bindings
for key in kwargs:
if key not in PromptSession._fields:
raise KeyError("unknown field", key)
setattr(self, key, kwargs[key])
class ModalPromptSession(PromptSession):
_spec_class = ModeSpec
_current_mode = None
_default_settings = {}
_specs = OrderedDict()
# new settings
add_history = True
search_no_duplicates = False
def _check_args(self, kwargs):
if "specs" in kwargs:
specs = kwargs["specs"]
for m in specs.values():
assert isinstance(m, ModeSpec)
def _filter_args(self, kwargs):
for key in ["add_history", "search_no_duplicates"]:
if key in kwargs:
setattr(self, key, kwargs[key])
del kwargs[key]
def __init__(self, *args, **kwargs):
self._check_args(kwargs)
self._filter_args(kwargs)
super().__init__(*args, **kwargs)
self._backup_settings()
# for backward compatibility
@property
def modes(self):
return self._specs
@property
def specs(self):
return self._specs
@property
def current_mode(self):
return self._current_mode
@property
def current_mode_spec(self):
return self.specs[self.current_mode]
def register_mode(self, name, **kwargs):
spec = self._spec_class(name, **kwargs)
self.specs[spec.name] = spec
if len(self.specs) == 1:
self.activate_mode(spec.name)
else:
self.activate_mode(self.current_mode, force=True)
def unregister_mode(self, spec_or_name):
if isinstance(spec_or_name, str):
del self.specs[spec_or_name]
else:
del self.specs[next(iter(k for k, v in self.specs.items() if v == spec_or_name))]
def activate_mode(self, name, force=False):
if name not in self.specs:
raise Exception("no such mode")
spec = self.specs[name]
if self.current_mode == spec.name and not force:
return
if self.current_mode:
current_spec = self.specs[self.current_mode]
if current_spec.on_dectivated:
current_spec.on_dectivated(self)
self._current_mode = spec.name
self._restore_settings()
for name in self._fields:
if name != "key_bindings":
if hasattr(spec, name):
setattr(self, name, getattr(spec, name))
self.key_bindings = merge_key_bindings(
[DynamicKeyBindings(lambda: self.specs[self.current_mode].prompt_key_bindings)] +
[
m.key_bindings for m in self.specs.values()
if hasattr(m, "key_bindings") and m.key_bindings
]
)
if spec.on_activated:
spec.on_activated(self)
def _backup_settings(self):
for name in self._fields:
self._default_settings[name] = getattr(self, name)
def _restore_settings(self):
for name in self._fields:
setattr(self, name, self._default_settings[name])
def _create_default_buffer(self):
"""
radian modifications
supports both complete_while_typing and enable_history_search
Create and return the default input buffer.
"""
dyncond = self._dyncond
# Create buffers list.
def accept(buff) -> bool:
"""Accept the content of the default buffer. This is called when
the validation succeeds."""
cast(Application[str], get_app()).exit(result=buff.document.text)
return True # Keep text, we call 'reset' later on.
return ModalBuffer(
name=DEFAULT_BUFFER,
# Make sure that complete_while_typing is disabled when
# enable_history_search is enabled. (First convert to Filter,
# to avoid doing bitwise operations on bool objects.)
complete_while_typing=Condition(
lambda: is_true(self.complete_while_typing)
# and not is_true(self.enable_history_search)
and not self.complete_style == CompleteStyle.READLINE_LIKE
),
validate_while_typing=dyncond("validate_while_typing"),
enable_history_search=dyncond("enable_history_search"),
validator=DynamicValidator(lambda: self.validator),
completer=DynamicCompleter(
lambda: ThreadedCompleter(self.completer)
if self.complete_in_thread and self.completer
else self.completer
),
history=self.history,
auto_suggest=DynamicAutoSuggest(lambda: self.auto_suggest),
accept_handler=accept,
tempfile_suffix=lambda: to_str(self.tempfile_suffix or ""),
tempfile=lambda: to_str(self.tempfile or ""),
session=self,
search_no_duplicates=self.search_no_duplicates
)
def _create_application(self, *args, **kwargs):
app = super()._create_application(*args, **kwargs)
kb = KeyBindings()
# operate-and-get-next
@kb.add('c-o', filter=emacs_mode)
def _(event):
buff = event.current_buffer
working_index = buff.working_index
buff.validate_and_handle()
def set_working_index() -> None:
buff.go_to_next_history(working_index)
event.app.pre_run_callables.append(set_working_index)
app._default_bindings = merge_key_bindings([app._default_bindings, kb])
return app
def prompt(self, *args, **kwargs):
self._check_args(kwargs)
self._filter_args(kwargs)
if args:
raise Exception("positional arguments are deprecated")
backup = self._default_settings.copy()
for name in self._fields:
if name in kwargs:
value = kwargs[name]
if value is not None:
setattr(self._default_settings, name, value)
orig_mode = self.current_mode
try:
result = super().prompt(**kwargs)
except KeyboardInterrupt:
self._default_settings = backup.copy()
self.activate_mode(orig_mode, force=True)
raise KeyboardInterrupt
finally:
self._default_settings = backup.copy()
# prompt will restore settings, we need to reactivate current mode
self.activate_mode(self.current_mode, force=True)
return result
| 33.64557 | 93 | 0.628668 |
271e21e6522fa655063b850d8ba69c633fbf8498
| 2,186 |
py
|
Python
|
web/controller/nodata.py
|
Liuyanglong/monitor-portal
|
ce020f636e6179b3eb685660bd7e505ed41f2f9b
|
[
"Apache-2.0"
] | null | null | null |
web/controller/nodata.py
|
Liuyanglong/monitor-portal
|
ce020f636e6179b3eb685660bd7e505ed41f2f9b
|
[
"Apache-2.0"
] | null | null | null |
web/controller/nodata.py
|
Liuyanglong/monitor-portal
|
ce020f636e6179b3eb685660bd7e505ed41f2f9b
|
[
"Apache-2.0"
] | 1 |
2022-02-21T09:46:36.000Z
|
2022-02-21T09:46:36.000Z
|
# -*- coding:utf-8 -*-
__author__ = 'niean'
from web import app
from flask import request, g, render_template, jsonify
from web.model.nodata import Nodata
from frame.params import required_chk
from frame.config import UIC_ADDRESS
@app.route('/nodatas')
def nodatas_get():
g.menu = 'nodatas'
page = int(request.args.get('p', 1))
limit = int(request.args.get('limit', 5))
query = request.args.get('q', '').strip()
mine = request.args.get('mine', '1')
me = g.user_name if mine == '1' else None
vs, total = Nodata.query(page, limit, query, me)
return render_template(
'nodata/list.html',
data={
'vs': vs,
'total': total,
'query': query,
'limit': limit,
'page': page,
'mine': mine,
}
)
@app.route('/nodata/add')
def nodata_add_get():
g.menu = 'nodatas'
o = Nodata.get(int(request.args.get('id', '0').strip()))
return render_template('nodata/add.html',
data={'nodata': o, 'uic_address': UIC_ADDRESS['external']})
@app.route('/nodata/update', methods=['POST'])
def nodata_update_post():
nodata_id = request.form['nodata_id'].strip()
name = request.form['name'].strip()
obj = request.form['obj'].strip()
obj_type = request.form['obj_type'].strip()
metric = request.form['metric'].strip()
tags = request.form['tags'].strip()
dstype = request.form['dstype'].strip()
step = request.form['step'].strip()
mock = request.form['mock'].strip()
msg = required_chk({
'name' : name,
'endpoint' : obj,
'endpoint_type' : obj_type,
'metric' : metric,
'type' : dstype,
'step' : step,
'mock_value': mock,
})
if msg:
return jsonify(msg=msg)
return jsonify(msg=Nodata.save_or_update(
nodata_id,
name,
obj,
obj_type,
metric,
tags,
dstype,
step,
mock,
g.user_name,
))
@app.route('/nodata/delete/<nodata_id>')
def nodata_delete_get(nodata_id):
nodata_id = int(nodata_id)
Nodata.delete_one(nodata_id)
return jsonify(msg='')
| 26.658537 | 86 | 0.574565 |
ff91c7953c1fb73e4c389ccfb60a00ee94dde957
| 7,780 |
py
|
Python
|
bip_utils/conf/bip49_coin_conf.py
|
spletnik/bip_utils
|
d0fa0290e31a8a69fd5644ceb09e096deb515dfd
|
[
"MIT"
] | null | null | null |
bip_utils/conf/bip49_coin_conf.py
|
spletnik/bip_utils
|
d0fa0290e31a8a69fd5644ceb09e096deb515dfd
|
[
"MIT"
] | null | null | null |
bip_utils/conf/bip49_coin_conf.py
|
spletnik/bip_utils
|
d0fa0290e31a8a69fd5644ceb09e096deb515dfd
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Imports
from bip_utils.conf.bip_coin_conf_enum import AddrTypes, Bip32Types
from bip_utils.conf.bip_coin_conf_common import *
from bip_utils.conf.bip_coin_conf_helper import CoinNames, KeyNetVersions
from bip_utils.conf.bip_coin_conf import BipCoinConf, BipBitcoinCashConf, BipLitecoinConf
# Bitcoin key net version for main net (ypub / yprv)
BIP49_BTC_KEY_NET_VER_MAIN: KeyNetVersions = KeyNetVersions(b"049d7cb2", b"049d7878")
# Bitcoin key net version for test net (upub / uprv)
BIP49_BTC_KEY_NET_VER_TEST: KeyNetVersions = KeyNetVersions(b"044a5262", b"044a4e28")
# Bitcoin P2SH net version for main net
BIP49_BTC_P2SH_NET_VER_MAIN: bytes = b"\x05"
# Bitcoin P2SH net version for test net
BIP49_BTC_P2SH_NET_VER_TEST: bytes = b"\xc4"
# Configuration for Bitcoin main net
Bip49BitcoinMainNet: BipCoinConf = BipCoinConf(
coin_name=CoinNames("Bitcoin", "BTC"),
is_testnet=False,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=BIP49_BTC_KEY_NET_VER_MAIN,
wif_net_ver=BTC_WIF_NET_VER_MAIN,
bip32_type=Bip32Types.SECP256K1,
addr_conf={"net_ver": BIP49_BTC_P2SH_NET_VER_MAIN},
addr_type=AddrTypes.P2SH)
# Configuration for Bitcoin test net
Bip49BitcoinTestNet: BipCoinConf = BipCoinConf(
coin_name=CoinNames("Bitcoin TestNet", "BTC"),
is_testnet=True,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=BIP49_BTC_KEY_NET_VER_TEST,
wif_net_ver=BTC_WIF_NET_VER_TEST,
bip32_type=Bip32Types.SECP256K1,
addr_conf={"net_ver": b"\xc4"},
addr_type=AddrTypes.P2SH)
# Configuration for Bitcoin Cash main net
Bip49BitcoinCashMainNet: BipBitcoinCashConf = BipBitcoinCashConf(
coin_name=CoinNames("Bitcoin Cash", "BCH"),
is_testnet=False,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=BIP49_BTC_KEY_NET_VER_MAIN,
wif_net_ver=BTC_WIF_NET_VER_MAIN,
bip32_type=Bip32Types.SECP256K1,
addr_conf={"std_net_ver": b"\x08", "std_hrp": "bitcoincash", "legacy_net_ver": BIP49_BTC_P2SH_NET_VER_MAIN},
addr_type=AddrTypes.P2SH_BCH,
addr_type_legacy=AddrTypes.P2SH)
# Configuration for Bitcoin Cash test net
Bip49BitcoinCashTestNet: BipBitcoinCashConf = BipBitcoinCashConf(
coin_name=CoinNames("Bitcoin Cash TestNet", "BCH"),
is_testnet=True,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=BIP49_BTC_KEY_NET_VER_TEST,
wif_net_ver=BTC_WIF_NET_VER_TEST,
bip32_type=Bip32Types.SECP256K1,
addr_conf={"std_net_ver": b"\x08", "std_hrp": "bchtest", "legacy_net_ver": BIP49_BTC_P2SH_NET_VER_TEST},
addr_type=AddrTypes.P2SH_BCH,
addr_type_legacy=AddrTypes.P2SH)
# Configuration for BitcoinSV main net
Bip49BitcoinSvMainNet: BipCoinConf = BipCoinConf(
coin_name=CoinNames("BitcoinSV", "BSV"),
is_testnet=False,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=BIP49_BTC_KEY_NET_VER_MAIN,
wif_net_ver=BTC_WIF_NET_VER_MAIN,
bip32_type=Bip32Types.SECP256K1,
addr_conf={"net_ver": BIP49_BTC_P2SH_NET_VER_MAIN},
addr_type=AddrTypes.P2SH)
# Configuration for BitcoinSV test net
Bip49BitcoinSvTestNet: BipCoinConf = BipCoinConf(
coin_name=CoinNames("BitcoinSV TestNet", "BSV"),
is_testnet=True,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=BIP49_BTC_KEY_NET_VER_TEST,
wif_net_ver=BTC_WIF_NET_VER_TEST,
bip32_type=Bip32Types.SECP256K1,
addr_conf={"net_ver": BIP49_BTC_P2SH_NET_VER_TEST},
addr_type=AddrTypes.P2SH)
# Configuration for Dash main net
Bip49DashMainNet: BipCoinConf = BipCoinConf(
coin_name=CoinNames("Dash", "DASH"),
is_testnet=False,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=BIP49_BTC_KEY_NET_VER_MAIN,
wif_net_ver=b"\xcc",
bip32_type=Bip32Types.SECP256K1,
addr_conf={"net_ver": b"\x10"},
addr_type=AddrTypes.P2SH)
# Configuration for Dash test net
Bip49DashTestNet: BipCoinConf = BipCoinConf(
coin_name=CoinNames("Dash TestNet", "DASH"),
is_testnet=True,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=BIP49_BTC_KEY_NET_VER_TEST,
wif_net_ver=BTC_WIF_NET_VER_TEST,
bip32_type=Bip32Types.SECP256K1,
addr_conf={"net_ver": b"\x13"},
addr_type=AddrTypes.P2SH)
# Configuration for Dogecoin main net
Bip49DogecoinMainNet: BipCoinConf = BipCoinConf(
coin_name=CoinNames("Dogecoin", "DOGE"),
is_testnet=False,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=KeyNetVersions(b"02facafd", b"02fac398"), # dgub / dgpv
wif_net_ver=b"\x9e",
bip32_type=Bip32Types.SECP256K1,
addr_conf={"net_ver": b"\x16"},
addr_type=AddrTypes.P2SH)
# Configuration for Dogecoin test net
Bip49DogecoinTestNet: BipCoinConf = BipCoinConf(
coin_name=CoinNames("Dogecoin TestNet", "DOGE"),
is_testnet=True,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=KeyNetVersions(b"0432a9a8", b"0432a243"), # tgub / tgpv
wif_net_ver=b"\xf1",
bip32_type=Bip32Types.SECP256K1,
addr_conf={"net_ver": BIP49_BTC_P2SH_NET_VER_TEST},
addr_type=AddrTypes.P2SH)
# Configuration for Litecoin main net
Bip49LitecoinMainNet: BipLitecoinConf = BipLitecoinConf(
coin_name=CoinNames("Litecoin", "LTC"),
is_testnet=False,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=BIP49_BTC_KEY_NET_VER_MAIN,
alt_key_net_ver=KeyNetVersions(b"01b26ef6", b"01b26792"), # Mtpv / Mtub
wif_net_ver=b"\xb0",
bip32_type=Bip32Types.SECP256K1,
addr_conf={"std_net_ver": b"\x32", "depr_net_ver": BIP49_BTC_P2SH_NET_VER_MAIN},
addr_type=AddrTypes.P2SH)
# Configuration for Litecoin test net
Bip49LitecoinTestNet: BipLitecoinConf = BipLitecoinConf(
coin_name=CoinNames("Litecoin TestNet", "LTC"),
is_testnet=True,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=KeyNetVersions(b"0436f6e1", b"0436ef7d"), # ttub / ttpv
alt_key_net_ver=KeyNetVersions(b"0436f6e1", b"0436ef7d"), # ttub / ttpv
wif_net_ver=BTC_WIF_NET_VER_TEST,
bip32_type=Bip32Types.SECP256K1,
addr_conf={"std_net_ver": b"\x3a", "depr_net_ver": BIP49_BTC_P2SH_NET_VER_TEST},
addr_type=AddrTypes.P2SH)
# Configuration for Zcash main net
Bip49ZcashMainNet: BipCoinConf = BipCoinConf(
coin_name=CoinNames("Zcash", "ZEC"),
is_testnet=False,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=BIP49_BTC_KEY_NET_VER_MAIN,
wif_net_ver=BTC_WIF_NET_VER_MAIN,
bip32_type=Bip32Types.SECP256K1,
addr_conf={"net_ver": b"\x1c\xbd"},
addr_type=AddrTypes.P2SH)
# Configuration for Zcash test net
Bip49ZcashTestNet: BipCoinConf = BipCoinConf(
coin_name=CoinNames("Zcash TestNet", "ZEC"),
is_testnet=True,
def_path=NOT_HARDENED_DEF_PATH,
key_net_ver=BIP49_BTC_KEY_NET_VER_TEST,
wif_net_ver=BTC_WIF_NET_VER_TEST,
bip32_type=Bip32Types.SECP256K1,
addr_conf={"net_ver": b"\x1c\xba"},
addr_type=AddrTypes.P2SH)
| 41.382979 | 113 | 0.769152 |
bbb1ee1ab1087417f6d5fef069c817ebdd114162
| 302 |
py
|
Python
|
main.py
|
Trimatix-indie/SuperDeckBreaker
|
6c5f0a6593df5e7f6807b1e2b09aff65dcf8a6fc
|
[
"MIT"
] | 5 |
2020-12-04T19:52:16.000Z
|
2021-03-19T19:56:05.000Z
|
main.py
|
KroeteTroete/GOF2BountyBot-1
|
f7f276a1b0a8fa376fab3d48e0a1f3989e15c80d
|
[
"MIT"
] | 783 |
2021-01-22T20:54:48.000Z
|
2022-03-29T08:22:49.000Z
|
main.py
|
Trimatix-indie/SuperDeckBreaker
|
6c5f0a6593df5e7f6807b1e2b09aff65dcf8a6fc
|
[
"MIT"
] | 5 |
2020-12-05T13:47:08.000Z
|
2022-01-20T22:13:20.000Z
|
import sys
from bot.cfg import configurator
# Load config if one is given
if len(sys.argv) > 1:
configurator.loadCfg(sys.argv[1])
# initialize bot config
configurator.init()
# load and run bot
from bot import bot
status = bot.run()
# return exit status code for bot restarting
sys.exit(status)
| 17.764706 | 44 | 0.741722 |
024ce74ffbb9202e0e661931d0a1f9e2828f0bee
| 3,869 |
py
|
Python
|
narada/__init__.py
|
rahul-verma/narada
|
7a96c96bd46c29329dc8493cbd5aa2b68e03ce7c
|
[
"Apache-2.0"
] | null | null | null |
narada/__init__.py
|
rahul-verma/narada
|
7a96c96bd46c29329dc8493cbd5aa2b68e03ce7c
|
[
"Apache-2.0"
] | null | null | null |
narada/__init__.py
|
rahul-verma/narada
|
7a96c96bd46c29329dc8493cbd5aa2b68e03ce7c
|
[
"Apache-2.0"
] | 1 |
2020-09-05T10:00:32.000Z
|
2020-09-05T10:00:32.000Z
|
import time
import os
from flask import Flask, request, Response
from flask_restful import Api, Resource
from waitress import serve
from uuid import uuid4
MY_DIR = os.path.dirname(os.path.realpath(__file__))
RES_DIR = os.path.join(MY_DIR, "res")
items = []
ditems = dict()
class Item(Resource):
def __get_item_if_exists(self, name):
return next(iter(filter(lambda x: x['name'] == name, items)), None)
def get(self, name):
item = self.__get_item_if_exists(name)
return item, item and 200 or 404
def post(self):
rdata = request.get_json() #force=True -> now it does not need content-type header
name = rdata['name']
item = self.__get_item_if_exists(name)
if item:
return {'code' : 'error', 'message' : 'item already exists for name: ' + name}, 400 # Bad Request
item = {'name' : name, 'price' : rdata['price']}
items.append(item)
return {'code' : 'success'}, 200
def delete(self, name):
global items
items = list(filter(lambda x: x['name'] != name, items))
return {'code' : 'success'}
def put(self):
rdata = request.get_json()
name = rdata['name']
item = self.__get_item_if_exists(name)
if item:
item.update(rdata)
return {'code' : 'success'}, 200
else:
item = {'name' : name, 'price' : rdata['price']}
items.append(item)
return {'code' : 'success'}, 201
class DynamicItem(Resource):
'''
Generates an ID with post request and get request will work with that ID and not name.
'''
def get(self, iid):
try:
return ditems[iid], 200
except KeyError:
return None, 404
def post(self):
rdata = request.get_json() #force=True -> now it does not need content-type header
iid = str(uuid4())
rdata["iid"] = iid
ditems[iid] = rdata
return ditems[iid], 200
class ItemList(Resource):
def get(self):
return {'items' : items}
def delete(self):
global items
items = list()
return {'code' : 'success'}
class DynamicItemList(Resource):
def get(self):
return {'ditems' : ditems}
def delete(self):
global ditems
ditems = dict()
return {'code' : 'success'}
class Incrementer(Resource):
def get(self, value):
return {'value' : value + 1}, 200
class NaradaSvc(Resource):
def get(self, path):
f = open(os.path.join(RES_DIR, path), "r")
res = f.read().replace("${BODY}", "Hello there")
return Response(res, mimetype="text/html")
def __launch_setu_svc(port):
app = Flask(__name__)
api = Api(app)
api.add_resource(NaradaSvc, '/narada', '/narada/<path:path>')
api.add_resource(Item, '/item', '/item/<string:name>', endpoint='item')
api.add_resource(ItemList, '/items', endpoint='items')
api.add_resource(Incrementer, '/inc', '/inc/<int:value>', endpoint='inc')
api.add_resource(DynamicItem, '/ditem', '/ditem/<string:iid>', endpoint='ditem')
api.add_resource(DynamicItemList, '/ditems', endpoint='ditems')
# api.add_resource(ItemList, '/items', endpoint='items')
#app.run(port=port, use_evalex=False) #, debug=True)
serve(app, host="localhost", port=port, _quiet=True)
def wait_for_port(port):
import socket
server_address = ('localhost', port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ct = time.time()
while(time.time() - ct < 60):
try:
sock.bind(server_address)
sock.close()
return
except Exception as e:
time.sleep(1)
print("Port is not open. Timeout after 60 seconds.")
raise RuntimeError("Another service is running at port {}. Narada could not be launched. Message: ".format(port))
def launch_service(port):
try:
wait_for_port(port)
__launch_setu_svc(port)
except Exception as e:
raise RuntimeError("Not able to launch Narada Service. Got response: ", e)
| 28.448529 | 117 | 0.640993 |
154b141f4e0130b235747baa87315c2715f21d9a
| 1,410 |
py
|
Python
|
demo/utils.py
|
JZZ-NOTE/ERNIE
|
ff89c2a6baef9fd662a5b847224ca39e67d6ad11
|
[
"Apache-2.0"
] | 3,712 |
2019-07-19T08:40:57.000Z
|
2022-03-31T08:52:43.000Z
|
demo/utils.py
|
JZZ-NOTE/ERNIE
|
ff89c2a6baef9fd662a5b847224ca39e67d6ad11
|
[
"Apache-2.0"
] | 541 |
2019-07-21T08:57:11.000Z
|
2022-03-22T12:13:28.000Z
|
demo/utils.py
|
JZZ-NOTE/ERNIE
|
ff89c2a6baef9fd662a5b847224ca39e67d6ad11
|
[
"Apache-2.0"
] | 857 |
2019-07-19T08:40:52.000Z
|
2022-03-30T03:23:27.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
import argparse
import logging
import paddle
class UnpackDataLoader(paddle.io.DataLoader):
def __init__(self, *args, **kwargs):
super(UnpackDataLoader, self).__init__(*args, batch_size=1, **kwargs)
def __iter__(self):
return ([yy[0] for yy in y]
for y in super(UnpackDataLoader, self).__iter__())
def create_if_not_exists(dir):
try:
dir.mkdir(parents=True)
except FileExistsError:
pass
return dir
def get_warmup_and_linear_decay(max_steps, warmup_steps):
return lambda step: min(step / warmup_steps, 1. - (step - warmup_steps) / (max_steps - warmup_steps))
| 31.333333 | 105 | 0.739007 |
e787cfd832538ba181c4390750f200e5944a5cc7
| 4,141 |
py
|
Python
|
escalate/rest_api/tests/model_tests/workflow/outcome.py
|
darkreactions/ESCALATE
|
0020da00b81a2dd80d1c9fd72d2edf92b519e605
|
[
"MIT"
] | 11 |
2020-09-29T13:59:02.000Z
|
2022-03-23T04:57:52.000Z
|
escalate/rest_api/tests/model_tests/workflow/outcome.py
|
darkreactions/ESCALATE
|
0020da00b81a2dd80d1c9fd72d2edf92b519e605
|
[
"MIT"
] | 95 |
2019-11-18T20:10:49.000Z
|
2022-03-31T17:09:49.000Z
|
escalate/rest_api/tests/model_tests/workflow/outcome.py
|
darkreactions/ESCALATE
|
0020da00b81a2dd80d1c9fd72d2edf92b519e605
|
[
"MIT"
] | 2 |
2021-11-26T18:22:08.000Z
|
2022-03-31T11:57:10.000Z
|
from ..model_tests_utils import (
status_codes,
DELETE,
PUT,
POST,
GET,
ERROR,
random_model_dict,
check_status_code,
compare_data
)
from core.models import (
OutcomeTemplate,
ExperimentTemplate,
)
outcome_test_data = {}
outcome_tests = [
##----TEST 0----##
#creates an experiment
#creates an outcome with the previous two entries as foreign keys
#gets the outcome
#puts the outcome adding the other parameterdef to the manytomany field
#gets the updated outcome
#deletes the updated outcome
#gets the outcome (should return error)
[
*[{
'name': name,
'method': POST,
'endpoint': 'experimenttemplate-list',
'body': random_model_dict(ExperimentTemplate),
'args': [],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': POST
}
}
} for name in ['experiment0', 'experiment1']],
{
'name': 'outcome0',
'method': POST,
'endpoint': 'outcometemplate-list',
'body': (request_body := random_model_dict(OutcomeTemplate, experiment='experiment0__url')),
'args': [],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': POST,
'request_body': request_body
}
}
},
{
'name': 'outcome0_get_0',
'method': GET,
'endpoint': 'outcometemplate-detail',
'body': {},
'args': [
'outcome0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET
}
}
},
{
'name': 'outcome0_update_0',
'method': PUT,
'endpoint': 'outcometemplate-detail',
'body': (request_body := random_model_dict(OutcomeTemplate, experiment='experiment0__url')),
'args': [
'outcome0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': compare_data,
'args': [],
'kwargs': {
'status_code': PUT,
'request_body': request_body
}
}
},
{
'name': 'outcome0_get_1',
'method': GET,
'endpoint': 'outcometemplate-detail',
'body': {},
'args': [
'outcome0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': GET
}
}
},
{
'name': 'outcome0_delete_0',
'method': DELETE,
'endpoint': 'outcometemplate-detail',
'body': {},
'args': [
'outcome0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': DELETE
}
}
},
{
'name': 'outcome0_get_2',
'method': GET,
'endpoint': 'outcometemplate-detail',
'body': {},
'args': [
'outcome0__uuid'
],
'query_params': [],
'is_valid_response': {
'function': check_status_code,
'args': [],
'kwargs': {
'status_code': ERROR
}
}
},
],
]
| 27.791946 | 105 | 0.416808 |
c0100b2ab299aa11e70c5121481281f676b87f85
| 3,753 |
py
|
Python
|
kps2d_detection/test.py
|
abrichr/cvToolkit
|
7f559138c27fedf9e3e3929cd4d6e4f8198d4c51
|
[
"MIT"
] | null | null | null |
kps2d_detection/test.py
|
abrichr/cvToolkit
|
7f559138c27fedf9e3e3929cd4d6e4f8198d4c51
|
[
"MIT"
] | null | null | null |
kps2d_detection/test.py
|
abrichr/cvToolkit
|
7f559138c27fedf9e3e3929cd4d6e4f8198d4c51
|
[
"MIT"
] | null | null | null |
import os
import cv2
from tqdm import tqdm
from pose_utils import draw_img17, resize_img, draw_img18, convert18
from argparse import ArgumentParser
import numpy as np
MAXTORSO = 0
def action_recgnition(kps, thred=0.1):
global MAXTORSO
average_score = kps[:,2].sum() / 17
assert average_score > thred, 'average score is too low'
hip = np.zeros((2,3))
hipLeft, hipRight = kps[8], kps[11]
hip[0], hip[1] = kps[8], kps[11]
mask = hip[:, 2] > thred
assert mask.sum()>0, 'hip score too low'
hipHight = (hip[:, 1] * mask).sum() / mask.sum()
throat = kps[1]
assert throat[2]>thred, 'thaort score is too low'
hipCenter = np.sum(hip, axis=0)/2
torsoLength = np.linalg.norm(throat[:2]-hipCenter[:2])
if MAXTORSO<torsoLength:
MAXTORSO=torsoLength
print('torso length is ', MAXTORSO)
head = np.zeros((5, 3))
head[0] = kps[0] # nose
head[1:] = kps[14:18] # leftEye rightEye leftEar rightEar
mask = head[:,2] > thred
headHeight = (head[:, 1] * mask).sum() / mask.sum()
foot = np.zeros((2, 3))
foot[0], foot[1] = kps[10], kps[13]
mask = foot[:, 2] > thred
assert mask.sum()>0, 'foot score is too low'
footHeight = (foot[:, 1] * mask).sum() / mask.sum()
handLeft, handRight = kps[4], kps[7]
# assert handLeft[2]>thred and handRight[2]>thred, 'two hand score are too low'
knee = np.zeros((2,3))
kneeRight, kneeLeft = kps[9], kps[12]
knee[0], knee[1] = kneeLeft, kneeRight
assert kneeRight[2]>thred and kneeLeft[2]>thred, 'two knee score are too low'
mask = knee[:, 2] > thred
kneeHeight = (knee[:, 1] * mask).sum() / mask.sum()
# fall
torsoLength=MAXTORSO
print('FALL: {}, {}'.format((footHeight-headHeight), torsoLength) )
print('squat', abs(hipHight-kneeHeight), torsoLength/5*2, footHeight-hipHight, torsoLength)
if footHeight-headHeight < torsoLength:
print('fall')
return 'fall'
elif headHeight-handLeft[1] > 0 and headHeight-handRight[1] > 0:
print('hand up and help!')
return 'hand up and help'
elif abs(hipHight-kneeHeight) < torsoLength/5*2 and footHeight-hipHight>torsoLength/2:
print('squat')
return 'squat'
else:
print('Other')
return 'other'
def openpose_video(video_name, display=None):
from open_pose.pose_estimation import get_model, get_keypoints
op_model = get_model(tracking=1)
cap = cv2.VideoCapture(video_name)
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
ret, frame = cap.read()
frame, W, H = resize_img(frame)
H,W = frame.shape[:2]
output_fps = cap.get(cv2.CAP_PROP_FPS)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# ⚠️读取时是(高、宽、3), 写入是设置是(宽、高)
out = cv2.VideoWriter('result.mp4',fourcc, output_fps, (W,H))
if length<10:
length = 10000
for i in tqdm(range(length-1)):
_, frame = cap.read()
frame, W, H = resize_img(frame)
kps = get_keypoints(op_model, frame)
kps = convert18(kps)
# if i>=40 and i%5==0:
# pdb()
try:
action_text=action_recgnition(kps[0])
print('Action Text is ', action_text)
if display:
img = draw_img18(frame, kps, 1, 3, text=action_text)
out.write(img)
except Exception as e:
print(e)
if __name__ == "__main__":
video_name = os.path.join(os.environ.get("CVTOOLBOX"), 'data/test.mp4')
parser = ArgumentParser()
parser.add_argument("-i", "--video_input", help="input video file name", default="/home/xyliu/Videos/sports/dance.mp4")
args = parser.parse_args()
video_name = args.video_input
openpose_video(video_name, 1) #52 FPS (2 GPU)
| 32.634783 | 123 | 0.617373 |
4759192cad01dd49c5e7d84c97428627bc1bb5b4
| 963 |
py
|
Python
|
aa_comp.py
|
victoria-rees/learning_python
|
0ddb2044632b12b093d45dcc257458edf49d3d4f
|
[
"MIT"
] | null | null | null |
aa_comp.py
|
victoria-rees/learning_python
|
0ddb2044632b12b093d45dcc257458edf49d3d4f
|
[
"MIT"
] | null | null | null |
aa_comp.py
|
victoria-rees/learning_python
|
0ddb2044632b12b093d45dcc257458edf49d3d4f
|
[
"MIT"
] | null | null | null |
# Make a program that reports the amino acid composition in a file of proteins
import mcb185
import sys
count = {}
total = 0
for name, seq in mcb185.read_fasta(sys.argv[1]):
for aa in (seq):
if aa == '*': continue
if aa in count: count[aa] += 1
else: count[aa] = 1
total += 1
for aa in count:
print(aa, count[aa], count[aa]/total)
"""
python3 aa_comp.py -- fasta at_prots.fa | sort -nk2
W 528 0.012054244098442994
C 801 0.018286836217524315
H 1041 0.023766038080452946
M 1097 0.025044518515136296
Y 1281 0.02924523994338158
Q 1509 0.03445048171316378
F 1842 0.04205287429797726
N 1884 0.04301173462398977
P 2051 0.046824345920277614
T 2153 0.04915300671202228
R 2320 0.05296561800831012
I 2356 0.05378749828774942
D 2573 0.05874160997214739
G 2732 0.06237158120633761
A 2772 0.06328478151682572
K 2910 0.06643532258800967
E 2989 0.06823889320122369
V 3001 0.06851285329437012
L 3950 0.09017853066070042
S 4012 0.09159399114195699
"""
| 17.509091 | 78 | 0.757009 |
16339cc7f9c1e6097705a3a275be673a60253423
| 2,528 |
py
|
Python
|
examples/low_order_moms_dense_batch.py
|
agorshk/daal4py
|
58a9b2301c47cd2d5144a403a59c210e10b75f8f
|
[
"Apache-2.0"
] | null | null | null |
examples/low_order_moms_dense_batch.py
|
agorshk/daal4py
|
58a9b2301c47cd2d5144a403a59c210e10b75f8f
|
[
"Apache-2.0"
] | null | null | null |
examples/low_order_moms_dense_batch.py
|
agorshk/daal4py
|
58a9b2301c47cd2d5144a403a59c210e10b75f8f
|
[
"Apache-2.0"
] | null | null | null |
#*******************************************************************************
# Copyright 2014-2020 Intel Corporation
# All Rights Reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License"), the following terms apply:
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#*******************************************************************************
# daal4py low order moments example for shared memory systems
import daal4py as d4p
import numpy as np
# let's try to use pandas' fast csv reader
try:
import pandas
read_csv = lambda f, c, t=np.float64: pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
except:
# fall back to numpy loadtxt
read_csv = lambda f, c, t=np.float64: np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)
def main(readcsv=read_csv, method="defaultDense"):
# read data from file
file = "./data/batch/covcormoments_dense.csv"
data = readcsv(file, range(10))
# compute
alg = d4p.low_order_moments(method=method)
res = alg.compute(data)
# result provides minimum, maximum, sum, sumSquares, sumSquaresCentered,
# mean, secondOrderRawMoment, variance, standardDeviation, variation
assert(all(getattr(res, name).shape==(1, data.shape[1]) for name in
['minimum', 'maximum', 'sum', 'sumSquares', 'sumSquaresCentered', 'mean',
'secondOrderRawMoment', 'variance', 'standardDeviation', 'variation']))
return res
if __name__ == "__main__":
res = main()
# print results
print("\nMinimum:\n", res.minimum)
print("\nMaximum:\n", res.maximum)
print("\nSum:\n", res.sum)
print("\nSum of squares:\n", res.sumSquares)
print("\nSum of squared difference from the means:\n", res.sumSquaresCentered)
print("\nMean:\n", res.mean)
print("\nSecond order raw moment:\n", res.secondOrderRawMoment)
print("\nVariance:\n", res.variance)
print("\nStandard deviation:\n", res.standardDeviation)
print("\nVariation:\n", res.variation)
print('All looks good!')
| 38.892308 | 109 | 0.640823 |
0ce1ea3469be9f6ffdf563b8f67fa374158ffb04
| 1,110 |
py
|
Python
|
app/api/model/vote.py
|
ChegeBryan/politico
|
746ef4c76931928ef145593092c8b391421a50fd
|
[
"MIT"
] | 1 |
2021-09-08T13:17:03.000Z
|
2021-09-08T13:17:03.000Z
|
app/api/model/vote.py
|
ChegeBryan/politico
|
746ef4c76931928ef145593092c8b391421a50fd
|
[
"MIT"
] | 62 |
2019-02-04T07:08:32.000Z
|
2021-05-06T19:49:03.000Z
|
app/api/model/vote.py
|
ChegeBryan/politico
|
746ef4c76931928ef145593092c8b391421a50fd
|
[
"MIT"
] | 5 |
2019-02-11T18:21:14.000Z
|
2022-02-25T07:41:07.000Z
|
""" Vote database model and methods """
import datetime as dt
class Vote:
""" Vote class for and methods for database manipulation """
def __init__(self, office, candidate):
self.office = office
self.candidate = candidate
self.created_on = dt.datetime.now()
def add_vote(self, user):
"""SQL query to insert a vote to database
Args:
user (integer): user who made the vote
"""
query = """INSERT INTO
votes(office_id, candidate_id, created_by, created_on)
VALUES(%s, %s, %s, %s);
"""
values = (self.office, self.candidate, user, self.created_on)
return query, values
@staticmethod
def get_cast_vote(user_id, office_id):
"""SQL query to return vote cast by user for particular office
Args:
user_id (integer): id of user who made the vote
office_id (integer): office user voted for
"""
sql = """SELECT * FROM votes WHERE office_id=%s AND created_by=%s;"""
query = sql, (office_id, user_id)
return query
| 27.75 | 77 | 0.595495 |
bc1e2ddde81d9c3cafd73d62e6785b87568c7ff4
| 4,971 |
py
|
Python
|
docs/conf.py
|
pacilab/Longbow
|
efd5eaaf35a812415d15760cde07f3ca274d6a96
|
[
"BSD-3-Clause"
] | 17 |
2017-02-07T09:48:23.000Z
|
2021-12-23T17:56:47.000Z
|
docs/conf.py
|
pacilab/Longbow
|
efd5eaaf35a812415d15760cde07f3ca274d6a96
|
[
"BSD-3-Clause"
] | 130 |
2016-09-30T09:12:31.000Z
|
2019-08-09T13:37:02.000Z
|
docs/conf.py
|
jimboid/Longbow
|
626dba54180078360b2f3ffd2c7290688d78f4be
|
[
"BSD-3-Clause"
] | 5 |
2018-04-17T07:52:13.000Z
|
2021-05-28T18:05:30.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../longbow'))
# -- Project information -----------------------------------------------------
project = u'Longbow'
copyright = u'2018, James T. Gebbie-Rayet, Gareth B. Shannon, Charles A. Laughton'
author = u'James T. Gebbie-Rayet, Gareth B. Shannon, Charles A. Laughton'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Longbowdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Longbow.tex', u'Longbow Documentation',
u'James T. Gebbie-Rayet, Gareth B. Shannon, Charles A. Laughton', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'longbow', u'Longbow Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Longbow', u'Longbow Documentation',
author, 'Longbow', 'One line description of project.',
'Miscellaneous'),
]
def setup(app):
app.add_stylesheet('table-overflow.css')
| 30.875776 | 82 | 0.659022 |
db6fd4b718109771b009351fa3f6bd6fe4671d28
| 4,096 |
py
|
Python
|
test/aaa_profiling/test_misc.py
|
Dreamsorcerer/sqlalchemy
|
153671df9d4cd7f2cdb3e14e6221f529269885d9
|
[
"MIT"
] | 1 |
2020-12-09T21:56:16.000Z
|
2020-12-09T21:56:16.000Z
|
test/aaa_profiling/test_misc.py
|
Dreamsorcerer/sqlalchemy
|
153671df9d4cd7f2cdb3e14e6221f529269885d9
|
[
"MIT"
] | 1 |
2021-01-25T09:53:34.000Z
|
2021-01-25T09:53:35.000Z
|
test/aaa_profiling/test_misc.py
|
Dreamsorcerer/sqlalchemy
|
153671df9d4cd7f2cdb3e14e6221f529269885d9
|
[
"MIT"
] | 2 |
2021-01-10T10:49:52.000Z
|
2021-01-13T09:34:27.000Z
|
from sqlalchemy import Column
from sqlalchemy import Enum
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy.orm import join as ormjoin
from sqlalchemy.orm import mapper
from sqlalchemy.orm import relationship
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import profiling
from sqlalchemy.util import classproperty
class EnumTest(fixtures.TestBase):
__requires__ = ("cpython", "python_profiling_backend")
def setup(self):
class SomeEnum(object):
# Implements PEP 435 in the minimal fashion needed by SQLAlchemy
_members = {}
@classproperty
def __members__(cls):
"""simulate a very expensive ``__members__`` getter"""
for i in range(10):
x = {}
x.update({k: v for k, v in cls._members.items()}.copy())
return x.copy()
def __init__(self, name, value):
self.name = name
self.value = value
self._members[name] = self
setattr(self.__class__, name, self)
for i in range(400):
SomeEnum("some%d" % i, i)
self.SomeEnum = SomeEnum
@profiling.function_call_count()
def test_create_enum_from_pep_435_w_expensive_members(self):
Enum(self.SomeEnum)
class CacheKeyTest(fixtures.TestBase):
# python3 is just to have less variability in test counts
__requires__ = ("cpython", "python_profiling_backend", "python3")
@testing.fixture(scope="class")
def mapping_fixture(self):
# note in order to work nicely with "fixture" we are emerging
# a whole new model of setup/teardown, since pytest "fixture"
# sort of purposely works badly with setup/teardown
metadata = MetaData()
parent = Table(
"parent",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(20)),
)
child = Table(
"child",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(20)),
Column(
"parent_id", Integer, ForeignKey("parent.id"), nullable=False
),
)
class Parent(testing.entities.BasicEntity):
pass
class Child(testing.entities.BasicEntity):
pass
mapper(
Parent,
parent,
properties={"children": relationship(Child, backref="parent")},
)
mapper(Child, child)
return Parent, Child
@testing.fixture(scope="function")
def stmt_fixture_one(self, mapping_fixture):
# note that by using ORM elements we will have annotations in these
# items also which is part of the performance hit
Parent, Child = mapping_fixture
return [
(
select(Parent.id, Child.id)
.select_from(ormjoin(Parent, Child, Parent.children))
.where(Child.id == 5)
)
for i in range(100)
]
@profiling.function_call_count(variance=0.15, warmup=2)
def test_statement_key_is_cached(self, stmt_fixture_one):
current_key = None
for stmt in stmt_fixture_one:
key = stmt._generate_cache_key()
assert key is not None
if current_key:
eq_(key, current_key)
else:
current_key = key
@profiling.function_call_count(variance=0.15, warmup=0)
def test_statement_key_is_not_cached(self, stmt_fixture_one):
current_key = None
for stmt in stmt_fixture_one:
key = stmt._generate_cache_key()
assert key is not None
if current_key:
eq_(key, current_key)
else:
current_key = key
| 31.507692 | 77 | 0.601807 |
c1d0441d107e61b91a9b02a73008f9f393e15db8
| 21,570 |
py
|
Python
|
envs/robosuite/robosuite/environments/manipulation/two_arm_peg_in_hole.py
|
hzm2016/assistive-gym-robosuite
|
5c529f4444cc386383618bfa584341740a8468f9
|
[
"MIT"
] | 1 |
2021-11-22T07:45:28.000Z
|
2021-11-22T07:45:28.000Z
|
envs/robosuite/robosuite/environments/manipulation/two_arm_peg_in_hole.py
|
hzm2016/assistive-gym-robosuite
|
5c529f4444cc386383618bfa584341740a8468f9
|
[
"MIT"
] | null | null | null |
envs/robosuite/robosuite/environments/manipulation/two_arm_peg_in_hole.py
|
hzm2016/assistive-gym-robosuite
|
5c529f4444cc386383618bfa584341740a8468f9
|
[
"MIT"
] | null | null | null |
import numpy as np
from envs.robosuite.robosuite.utils import transform_utils as T
from envs.robosuite.robosuite.environments.manipulation.two_arm_env import TwoArmEnv
from envs.robosuite.robosuite.utils.mjcf_utils import CustomMaterial, array_to_string, find_elements
from envs.robosuite.robosuite.utils.observables import Observable, sensor
from envs.robosuite.robosuite.models.objects import CylinderObject, PlateWithHoleObject
from envs.robosuite.robosuite.models.arenas import EmptyArena
from envs.robosuite.robosuite.models.tasks import ManipulationTask
class TwoArmPegInHole(TwoArmEnv):
"""
This class corresponds to the peg-in-hole task for two robot arms.
Args:
robots (str or list of str): Specification for specific robot arm(s) to be instantiated within this env
(e.g: "Sawyer" would generate one arm; ["Panda", "Panda", "Sawyer"] would generate three robot arms)
Note: Must be either 2 single single-arm robots or 1 bimanual robot!
env_configuration (str): Specifies how to position the robots within the environment. Can be either:
:`'bimanual'`: Only applicable for bimanual robot setups. Sets up the (single) bimanual robot on the -x
side of the table
:`'single-arm-parallel'`: Only applicable for multi single arm setups. Sets up the (two) single armed
robots next to each other on the -x side of the table
:`'single-arm-opposed'`: Only applicable for multi single arm setups. Sets up the (two) single armed
robots opposed from each others on the opposite +/-y sides of the table.
Note that "default" corresponds to either "bimanual" if a bimanual robot is used or "single-arm-opposed" if two
single-arm robots are used.
controller_configs (str or list of dict): If set, contains relevant controller parameters for creating a
custom controller. Else, uses the default controller for this specific task. Should either be single
dict if same controller is to be used for all robots or else it should be a list of the same length as
"robots" param
gripper_types (str or list of str): type of gripper, used to instantiate gripper models from gripper factory.
For this environment, setting a value other than the default (None) will raise an AssertionError, as
this environment is not meant to be used with any gripper at all.
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
:`'magnitude'`: The scale factor of uni-variate random noise applied to each of a robot's given initial
joint positions. Setting this value to `None` or 0.0 results in no noise being applied.
If "gaussian" type of noise is applied then this magnitude scales the standard deviation applied,
If "uniform" type of noise is applied then this magnitude sets the bounds of the sampling range
:`'type'`: Type of noise to apply. Can either specify "gaussian" or "uniform"
Should either be single dict if same noise value is to be used for all robots or else it should be a
list of the same length as "robots" param
:Note: Specifying "default" will automatically use the default noise settings.
Specifying None will automatically create the required dict with "magnitude" set to 0.0.
use_camera_obs (bool or list of bool): if True, every observation for a specific robot includes a rendered
image. Should either be single bool if camera obs value is to be used for all
robots or else it should be a list of the same length as "robots" param
use_object_obs (bool): if True, include object (cube) information in
the observation.
reward_scale (None or float): Scales the normalized reward function by the amount specified.
If None, environment reward remains unnormalized
reward_shaping (bool): if True, use dense rewards.
peg_radius (2-tuple): low and high limits of the (uniformly sampled)
radius of the peg
peg_length (float): length of the peg
has_renderer (bool): If true, render the simulation state in
a viewer instead of headless mode.
has_offscreen_renderer (bool): True if using off-screen rendering
render_camera (str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
will result in the default angle being applied, which is useful as it can be dragged / panned by
the user using the mouse
render_collision_mesh (bool): True if rendering collision meshes in camera. False otherwise.
render_visual_mesh (bool): True if rendering visual meshes in camera. False otherwise.
render_gpu_device_id (int): corresponds to the GPU device id to use for offscreen rendering.
Defaults to -1, in which case the device will be inferred from environment variables
(GPUS or CUDA_VISIBLE_DEVICES).
control_freq (float): how many control signals to receive in every second. This sets the amount of
simulation time that passes between every action input.
horizon (int): Every episode lasts for exactly @horizon timesteps.
ignore_done (bool): True if never terminating the environment (ignore @horizon).
hard_reset (bool): If True, re-loads model, sim, and render object upon a reset call, else,
only calls sim.reset and resets all robosuite-internal variables
camera_names (str or list of str): name of camera to be rendered. Should either be single str if
same name is to be used for all cameras' rendering or else it should be a list of cameras to render.
:Note: At least one camera must be specified if @use_camera_obs is True.
:Note: To render all robots' cameras of a certain type (e.g.: "robotview" or "eye_in_hand"), use the
convention "all-{name}" (e.g.: "all-robotview") to automatically render all camera images from each
robot's camera list).
camera_heights (int or list of int): height of camera frame. Should either be single int if
same height is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_widths (int or list of int): width of camera frame. Should either be single int if
same width is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_depths (bool or list of bool): True if rendering RGB-D, and RGB otherwise. Should either be single
bool if same depth setting is to be used for all cameras or else it should be a list of the same length as
"camera names" param.
Raises:
AssertionError: [Gripper specified]
ValueError: [Invalid number of robots specified]
ValueError: [Invalid env configuration]
ValueError: [Invalid robots for specified env configuration]
"""
def __init__(
self,
robots,
env_configuration="default",
controller_configs=None,
gripper_types=None,
initialization_noise="default",
use_camera_obs=True,
use_object_obs=True,
reward_scale=1.0,
reward_shaping=False,
peg_radius=(0.015, 0.03),
peg_length=0.13,
has_renderer=False,
has_offscreen_renderer=True,
render_camera="frontview",
render_collision_mesh=False,
render_visual_mesh=True,
render_gpu_device_id=-1,
control_freq=20,
horizon=1000,
ignore_done=False,
hard_reset=True,
camera_names="agentview",
camera_heights=256,
camera_widths=256,
camera_depths=False,
):
# Assert that the gripper type is None
assert gripper_types is None, "Tried to specify gripper other than None in TwoArmPegInHole environment!"
# reward configuration
self.reward_scale = reward_scale
self.reward_shaping = reward_shaping
# whether to use ground-truth object states
self.use_object_obs = use_object_obs
# Save peg specs
self.peg_radius = peg_radius
self.peg_length = peg_length
super().__init__(
robots=robots,
env_configuration=env_configuration,
controller_configs=controller_configs,
mount_types="default",
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_camera=render_camera,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
render_gpu_device_id=render_gpu_device_id,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
hard_reset=hard_reset,
camera_names=camera_names,
camera_heights=camera_heights,
camera_widths=camera_widths,
camera_depths=camera_depths,
)
def reward(self, action=None):
"""
Reward function for the task.
Sparse un-normalized reward:
- a discrete reward of 5.0 is provided if the peg is inside the plate's hole
- Note that we enforce that it's inside at an appropriate angle (cos(theta) > 0.95).
Un-normalized summed components if using reward shaping:
- Reaching: in [0, 1], to encourage the arms to approach each other
- Perpendicular Distance: in [0,1], to encourage the arms to approach each other
- Parallel Distance: in [0,1], to encourage the arms to approach each other
- Alignment: in [0, 1], to encourage having the right orientation between the peg and hole.
- Placement: in {0, 1}, nonzero if the peg is in the hole with a relatively correct alignment
Note that the final reward is normalized and scaled by reward_scale / 5.0 as
well so that the max score is equal to reward_scale
"""
reward = 0
# Right location and angle
if self._check_success():
reward = 1.0
# use a shaping reward
if self.reward_shaping:
# Grab relevant values
t, d, cos = self._compute_orientation()
# reaching reward
hole_pos = self.sim.data.body_xpos[self.hole_body_id]
gripper_site_pos = self.sim.data.body_xpos[self.peg_body_id]
dist = np.linalg.norm(gripper_site_pos - hole_pos)
reaching_reward = 1 - np.tanh(1.0 * dist)
reward += reaching_reward
# Orientation reward
reward += 1 - np.tanh(d)
reward += 1 - np.tanh(np.abs(t))
reward += cos
# if we're not reward shaping, scale sparse reward so that the max reward is identical to its dense version
else:
reward *= 5.0
if self.reward_scale is not None:
reward *= self.reward_scale / 5.0
return reward
def _load_model(self):
"""
Loads an xml model, puts it in self.model
"""
super()._load_model()
# Adjust base pose(s) accordingly
if self.env_configuration == "bimanual":
xpos = self.robots[0].robot_model.base_xpos_offset["empty"]
self.robots[0].robot_model.set_base_xpos(xpos)
else:
if self.env_configuration == "single-arm-opposed":
# Set up robots facing towards each other by rotating them from their default position
for robot, rotation in zip(self.robots, (np.pi / 2, -np.pi / 2)):
xpos = robot.robot_model.base_xpos_offset["empty"]
rot = np.array((0, 0, rotation))
xpos = T.euler2mat(rot) @ np.array(xpos)
robot.robot_model.set_base_xpos(xpos)
robot.robot_model.set_base_ori(rot)
else: # "single-arm-parallel" configuration setting
# Set up robots parallel to each other but offset from the center
for robot, offset in zip(self.robots, (-0.25, 0.25)):
xpos = robot.robot_model.base_xpos_offset["empty"]
xpos = np.array(xpos) + np.array((0, offset, 0))
robot.robot_model.set_base_xpos(xpos)
# Add arena and robot
mujoco_arena = EmptyArena()
# Arena always gets set to zero origin
mujoco_arena.set_origin([0, 0, 0])
# Modify default agentview camera
mujoco_arena.set_camera(
camera_name="agentview",
pos=[1.0666432116509934, 1.4903257668114777e-08, 2.0563394967349096],
quat=[0.6530979871749878, 0.27104058861732483, 0.27104055881500244, 0.6530978679656982]
)
# initialize objects of interest
self.hole = PlateWithHoleObject(name="hole")
tex_attrib = {
"type": "cube",
}
mat_attrib = {
"texrepeat": "1 1",
"specular": "0.4",
"shininess": "0.1",
}
greenwood = CustomMaterial(
texture="WoodGreen",
tex_name="greenwood",
mat_name="greenwood_mat",
tex_attrib=tex_attrib,
mat_attrib=mat_attrib,
)
self.peg = CylinderObject(
name="peg",
size_min=(self.peg_radius[0], self.peg_length),
size_max=(self.peg_radius[1], self.peg_length),
material=greenwood,
rgba=[0, 1, 0, 1],
joints=None,
)
# Load hole object
hole_obj = self.hole.get_obj()
hole_obj.set("quat", "0 0 0.707 0.707")
hole_obj.set("pos", "0.11 0 0.17")
# Load peg object
peg_obj = self.peg.get_obj()
peg_obj.set("pos", array_to_string((0, 0, self.peg_length)))
# Append appropriate objects to arms
if self.env_configuration == "bimanual":
r_eef, l_eef = [self.robots[0].robot_model.eef_name[arm] for arm in self.robots[0].arms]
r_model, l_model = [self.robots[0].robot_model, self.robots[0].robot_model]
else:
r_eef, l_eef = [robot.robot_model.eef_name for robot in self.robots]
r_model, l_model = [self.robots[0].robot_model, self.robots[1].robot_model]
r_body = find_elements(root=r_model.worldbody, tags="body", attribs={"name": r_eef}, return_first=True)
l_body = find_elements(root=l_model.worldbody, tags="body", attribs={"name": l_eef}, return_first=True)
r_body.append(peg_obj)
l_body.append(hole_obj)
# task includes arena, robot, and objects of interest
# We don't add peg and hole directly since they were already appended to the robots
self.model = ManipulationTask(
mujoco_arena=mujoco_arena,
mujoco_robots=[robot.robot_model for robot in self.robots],
)
# Make sure to add relevant assets from peg and hole objects
self.model.merge_assets(self.hole)
self.model.merge_assets(self.peg)
def _setup_references(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
super()._setup_references()
# Additional object references from this env
self.hole_body_id = self.sim.model.body_name2id(self.hole.root_body)
self.peg_body_id = self.sim.model.body_name2id(self.peg.root_body)
def _setup_observables(self):
"""
Sets up observables to be used for this environment. Creates object-based observables if enabled
Returns:
OrderedDict: Dictionary mapping observable names to its corresponding Observable object
"""
observables = super()._setup_observables()
# low-level object information
if self.use_object_obs:
# Get robot prefix and define observables modality
if self.env_configuration == "bimanual":
pf0 = self.robots[0].robot_model.naming_prefix + "right_"
pf1 = self.robots[0].robot_model.naming_prefix + "left_"
else:
pf0 = self.robots[0].robot_model.naming_prefix
pf1 = self.robots[1].robot_model.naming_prefix
modality = "object"
# position and rotation of peg and hole
@sensor(modality=modality)
def hole_pos(obs_cache):
return np.array(self.sim.data.body_xpos[self.hole_body_id])
@sensor(modality=modality)
def hole_quat(obs_cache):
return T.convert_quat(self.sim.data.body_xquat[self.hole_body_id], to="xyzw")
@sensor(modality=modality)
def peg_to_hole(obs_cache):
return obs_cache["hole_pos"] - np.array(self.sim.data.body_xpos[self.peg_body_id]) if \
"hole_pos" in obs_cache else np.zeros(3)
@sensor(modality=modality)
def peg_quat(obs_cache):
return T.convert_quat(self.sim.data.body_xquat[self.peg_body_id], to="xyzw")
# Relative orientation parameters
@sensor(modality=modality)
def angle(obs_cache):
t, d, cos = self._compute_orientation()
obs_cache["t"] = t
obs_cache["d"] = d
return cos
@sensor(modality=modality)
def t(obs_cache):
return obs_cache["t"] if "t" in obs_cache else 0.0
@sensor(modality=modality)
def d(obs_cache):
return obs_cache["d"] if "d" in obs_cache else 0.0
sensors = [hole_pos, hole_quat, peg_to_hole, peg_quat, angle, t, d]
names = [s.__name__ for s in sensors]
# Create observables
for name, s in zip(names, sensors):
observables[name] = Observable(
name=name,
sensor=s,
sampling_rate=self.control_freq,
)
return observables
def _reset_internal(self):
"""
Resets simulation internal configurations.
"""
super()._reset_internal()
def _check_success(self):
"""
Check if peg is successfully aligned and placed within the hole
Returns:
bool: True if peg is placed in hole correctly
"""
t, d, cos = self._compute_orientation()
return d < 0.06 and -0.12 <= t <= 0.14 and cos > 0.95
def _compute_orientation(self):
"""
Helper function to return the relative positions between the hole and the peg.
In particular, the intersection of the line defined by the peg and the plane
defined by the hole is computed; the parallel distance, perpendicular distance,
and angle are returned.
Returns:
3-tuple:
- (float): parallel distance
- (float): perpendicular distance
- (float): angle
"""
peg_mat = self.sim.data.body_xmat[self.peg_body_id]
peg_mat.shape = (3, 3)
peg_pos = self.sim.data.body_xpos[self.peg_body_id]
hole_pos = self.sim.data.body_xpos[self.hole_body_id]
hole_mat = self.sim.data.body_xmat[self.hole_body_id]
hole_mat.shape = (3, 3)
v = peg_mat @ np.array([0, 0, 1])
v = v / np.linalg.norm(v)
center = hole_pos + hole_mat @ np.array([0.1, 0, 0])
t = (center - peg_pos) @ v / (np.linalg.norm(v) ** 2)
d = np.linalg.norm(np.cross(v, peg_pos - center)) / np.linalg.norm(v)
hole_normal = hole_mat @ np.array([0, 0, 1])
return (
t,
d,
abs(np.dot(hole_normal, v) / np.linalg.norm(hole_normal) / np.linalg.norm(v)),
)
def _peg_pose_in_hole_frame(self):
"""
A helper function that takes in a named data field and returns the pose of that
object in the base frame.
Returns:
np.array: (4,4) matrix corresponding to the pose of the peg in the hole frame
"""
# World frame
peg_pos_in_world = self.sim.data.get_body_xpos(self.peg.root_body)
peg_rot_in_world = self.sim.data.get_body_xmat(self.peg.root_body).reshape((3, 3))
peg_pose_in_world = T.make_pose(peg_pos_in_world, peg_rot_in_world)
# World frame
hole_pos_in_world = self.sim.data.get_body_xpos(self.hole.root_body)
hole_rot_in_world = self.sim.data.get_body_xmat(self.hole.root_body).reshape((3, 3))
hole_pose_in_world = T.make_pose(hole_pos_in_world, hole_rot_in_world)
world_pose_in_hole = T.pose_inv(hole_pose_in_world)
peg_pose_in_hole = T.pose_in_A_to_pose_in_B(
peg_pose_in_world, world_pose_in_hole
)
return peg_pose_in_hole
| 43.053892 | 119 | 0.632267 |
287d99c740ffac16d24b46065afb2245cff530c2
| 8,180 |
py
|
Python
|
sdk/identity/azure-identity/azure/identity/_credentials/default.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2 |
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/identity/azure-identity/azure/identity/_credentials/default.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 4 |
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/identity/azure-identity/azure/identity/_credentials/default.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 1 |
2019-04-05T18:17:43.000Z
|
2019-04-05T18:17:43.000Z
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import logging
import os
from .._constants import EnvironmentVariables
from .._internal import get_default_authority, normalize_authority
from .browser import InteractiveBrowserCredential
from .chained import ChainedTokenCredential
from .environment import EnvironmentCredential
from .managed_identity import ManagedIdentityCredential
from .shared_cache import SharedTokenCacheCredential
from .azure_cli import AzureCliCredential
from .vscode import VisualStudioCodeCredential
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Any, List
from azure.core.credentials import AccessToken, TokenCredential
_LOGGER = logging.getLogger(__name__)
class DefaultAzureCredential(ChainedTokenCredential):
"""A default credential capable of handling most Azure SDK authentication scenarios.
The identity it uses depends on the environment. When an access token is needed, it requests one using these
identities in turn, stopping when one provides a token:
1. A service principal configured by environment variables. See :class:`~azure.identity.EnvironmentCredential` for
more details.
2. An Azure managed identity. See :class:`~azure.identity.ManagedIdentityCredential` for more details.
3. On Windows only: a user who has signed in with a Microsoft application, such as Visual Studio. If multiple
identities are in the cache, then the value of the environment variable ``AZURE_USERNAME`` is used to select
which identity to use. See :class:`~azure.identity.SharedTokenCacheCredential` for more details.
4. The user currently signed in to Visual Studio Code.
5. The identity currently logged in to the Azure CLI.
This default behavior is configurable with keyword arguments.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com',
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds. Managed identities ignore this because they reside in a single cloud.
:keyword bool exclude_cli_credential: Whether to exclude the Azure CLI from the credential. Defaults to **False**.
:keyword bool exclude_environment_credential: Whether to exclude a service principal configured by environment
variables from the credential. Defaults to **False**.
:keyword bool exclude_managed_identity_credential: Whether to exclude managed identity from the credential.
Defaults to **False**.
:keyword bool exclude_visual_studio_code_credential: Whether to exclude stored credential from VS Code.
Defaults to **False**.
:keyword bool exclude_shared_token_cache_credential: Whether to exclude the shared token cache. Defaults to
**False**.
:keyword bool exclude_interactive_browser_credential: Whether to exclude interactive browser authentication (see
:class:`~azure.identity.InteractiveBrowserCredential`). Defaults to **True**.
:keyword str interactive_browser_tenant_id: Tenant ID to use when authenticating a user through
:class:`~azure.identity.InteractiveBrowserCredential`. Defaults to the value of environment variable
AZURE_TENANT_ID, if any. If unspecified, users will authenticate in their home tenants.
:keyword str managed_identity_client_id: The client ID of a user-assigned managed identity. Defaults to the value
of the environment variable AZURE_CLIENT_ID, if any. If not specified, a system-assigned identity will be used.
:keyword str shared_cache_username: Preferred username for :class:`~azure.identity.SharedTokenCacheCredential`.
Defaults to the value of environment variable AZURE_USERNAME, if any.
:keyword str shared_cache_tenant_id: Preferred tenant for :class:`~azure.identity.SharedTokenCacheCredential`.
Defaults to the value of environment variable AZURE_TENANT_ID, if any.
:keyword str visual_studio_code_tenant_id: Tenant ID to use when authenticating with
:class:`~azure.identity.VisualStudioCodeCredential`.
"""
def __init__(self, **kwargs):
# type: (**Any) -> None
authority = kwargs.pop("authority", None)
authority = normalize_authority(authority) if authority else get_default_authority()
interactive_browser_tenant_id = kwargs.pop(
"interactive_browser_tenant_id", os.environ.get(EnvironmentVariables.AZURE_TENANT_ID)
)
managed_identity_client_id = kwargs.pop(
"managed_identity_client_id", os.environ.get(EnvironmentVariables.AZURE_CLIENT_ID)
)
shared_cache_username = kwargs.pop("shared_cache_username", os.environ.get(EnvironmentVariables.AZURE_USERNAME))
shared_cache_tenant_id = kwargs.pop(
"shared_cache_tenant_id", os.environ.get(EnvironmentVariables.AZURE_TENANT_ID)
)
vscode_tenant_id = kwargs.pop(
"visual_studio_code_tenant_id", os.environ.get(EnvironmentVariables.AZURE_TENANT_ID)
)
exclude_environment_credential = kwargs.pop("exclude_environment_credential", False)
exclude_managed_identity_credential = kwargs.pop("exclude_managed_identity_credential", False)
exclude_shared_token_cache_credential = kwargs.pop("exclude_shared_token_cache_credential", False)
exclude_visual_studio_code_credential = kwargs.pop("exclude_visual_studio_code_credential", False)
exclude_cli_credential = kwargs.pop("exclude_cli_credential", False)
exclude_interactive_browser_credential = kwargs.pop("exclude_interactive_browser_credential", True)
credentials = [] # type: List[TokenCredential]
if not exclude_environment_credential:
credentials.append(EnvironmentCredential(authority=authority, **kwargs))
if not exclude_managed_identity_credential:
credentials.append(ManagedIdentityCredential(client_id=managed_identity_client_id, **kwargs))
if not exclude_shared_token_cache_credential and SharedTokenCacheCredential.supported():
try:
# username and/or tenant_id are only required when the cache contains tokens for multiple identities
shared_cache = SharedTokenCacheCredential(
username=shared_cache_username, tenant_id=shared_cache_tenant_id, authority=authority, **kwargs
)
credentials.append(shared_cache)
except Exception as ex: # pylint:disable=broad-except
_LOGGER.info("Shared token cache is unavailable: '%s'", ex)
if not exclude_visual_studio_code_credential:
credentials.append(VisualStudioCodeCredential(tenant_id=vscode_tenant_id))
if not exclude_cli_credential:
credentials.append(AzureCliCredential())
if not exclude_interactive_browser_credential:
credentials.append(InteractiveBrowserCredential(tenant_id=interactive_browser_tenant_id))
super(DefaultAzureCredential, self).__init__(*credentials)
def get_token(self, *scopes, **kwargs):
# type: (*str, **Any) -> AccessToken
"""Request an access token for `scopes`.
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
:raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The exception has a
`message` attribute listing each authentication attempt and its error message.
"""
if self._successful_credential:
token = self._successful_credential.get_token(*scopes, **kwargs)
_LOGGER.info(
"%s acquired a token from %s", self.__class__.__name__, self._successful_credential.__class__.__name__
)
return token
return super(DefaultAzureCredential, self).get_token(*scopes, **kwargs)
| 56.413793 | 120 | 0.741076 |
4138a57a394acc6f2b8252e25fb26b89e5f6ed92
| 855 |
py
|
Python
|
setup.py
|
piger/wikiquote2fortune
|
7ba2b5d076cbeb6c01dd6df8b5bd0936f130bb42
|
[
"BSD-3-Clause"
] | 1 |
2020-07-07T09:53:13.000Z
|
2020-07-07T09:53:13.000Z
|
setup.py
|
piger/wikiquote2fortune
|
7ba2b5d076cbeb6c01dd6df8b5bd0936f130bb42
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
piger/wikiquote2fortune
|
7ba2b5d076cbeb6c01dd6df8b5bd0936f130bb42
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
wikiquote2fortune
-----------------
wikiquote2fortune is a little script that can fetch a wikiquote page and extract
quotes to create a fortune(6) file.
"""
from setuptools import setup, find_packages
setup(
name='wikiquote2fortune',
version='0.2.0',
description="Create a fortune file from a wikiquote page",
author='Daniel Kertesz',
author_email='[email protected]',
url='https://github.com/piger/wikiquote2fortune',
license='BSD',
long_description=__doc__,
install_requires=[
'beautifulsoup4==4.10.0',
'Click==8.0.3',
'lxml==4.7.1',
'requests==2.26.0',
],
include_package_data=True,
packages=find_packages(),
entry_points={
'console_scripts': [
'wikiquote2fortune = wikiquote2fortune.main:main',
],
},
)
| 23.75 | 80 | 0.626901 |
293f95cf75c69f485a369ae7b6d3c3d5647644ae
| 7,379 |
py
|
Python
|
tutorials/micro/micro_tflite.py
|
maxtnuk/incubator-tvm
|
050a836b18c419213f34b8ac76afced425d9d70e
|
[
"Apache-2.0"
] | 2 |
2019-11-13T01:17:41.000Z
|
2020-05-15T19:06:52.000Z
|
tutorials/micro/micro_tflite.py
|
maxtnuk/incubator-tvm
|
050a836b18c419213f34b8ac76afced425d9d70e
|
[
"Apache-2.0"
] | null | null | null |
tutorials/micro/micro_tflite.py
|
maxtnuk/incubator-tvm
|
050a836b18c419213f34b8ac76afced425d9d70e
|
[
"Apache-2.0"
] | 2 |
2020-11-26T00:35:02.000Z
|
2020-12-07T03:15:56.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Micro TVM with TFLite Models
============================
**Author**: `Tom Gall <https://github.com/tom-gall>`_
This tutorial is an introduction to working with MicroTVM and a TFLite
model with Relay.
"""
# %%
# Setup
# -----
#
# To get started, TFLite package needs to be installed as prerequisite.
#
# install tflite
#
# .. code-block:: bash
#
# pip install tflite=2.1.0 --user
#
# or you could generate TFLite package yourself. The steps are the following:
#
# Get the flatc compiler.
# Please refer to https://github.com/google/flatbuffers for details
# and make sure it is properly installed.
#
# .. code-block:: bash
#
# flatc --version
#
# Get the TFLite schema.
#
# .. code-block:: bash
#
# wget https://raw.githubusercontent.com/tensorflow/tensorflow/r1.13/tensorflow/lite/schema/schema.fbs
#
# Generate TFLite package.
#
# .. code-block:: bash
#
# flatc --python schema.fbs
#
# Add the current folder (which contains generated tflite module) to PYTHONPATH.
#
# .. code-block:: bash
#
# export PYTHONPATH=${PYTHONPATH:+$PYTHONPATH:}$(pwd)
#
# To validate that the TFLite package was installed successfully, ``python -c "import tflite"``
#
# CMSIS needs to be downloaded and the CMSIS_ST_PATH environment variable setup
# This tutorial only supports the STM32F7xx series of boards.
# Download from : https://www.st.com/en/embedded-software/stm32cubef7.html
# After you've expanded the zip file
#
# .. code-block:: bash
#
# export CMSIS_ST_PATH=/path/to/STM32Cube_FW_F7_V1.16.0/Drivers/CMSIS
# %%
# Recreating your own Pre-Trained TFLite model
# --------------------------------------------
#
# The tutorial downloads a pretrained TFLite model. When working with microcontrollers
# you need to be mindful these are highly resource constrained devices as such standard
# models like MobileNet may not fit into their modest memory.
#
# For this tutorial, we'll make use of one of the TF Micro example models.
#
# If you wish to replicate the training steps see:
# https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/hello_world/train
#
# .. note::
#
# If you accidentally download the example pretrained model from:
# wget https://storage.googleapis.com/download.tensorflow.org/models/tflite/micro/hello_world_2020_04_13.zip
# this will fail due to an unimplemented opcode (114)
import os
import numpy as np
import tvm
import tvm.micro as micro
from tvm.contrib.download import download_testdata
from tvm.contrib import graph_runtime, utils
from tvm import relay
# %%
# Load and prepare the Pre-Trained Model
# --------------------------------------
#
# Load the pretrained TFLite model from a file in your current
# directory into a buffer
model_url = "https://people.linaro.org/~tom.gall/sine_model.tflite"
model_file = "sine_model.tflite"
model_path = download_testdata(model_url, model_file, module="data")
tflite_model_buf = open(model_path, "rb").read()
######################################################################
# Using the buffer, transform into a tflite model python object
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
######################################################################
# Print out the version of the model
version = tflite_model.Version()
print("Model Version: " + str(version))
######################################################################
# Parse the python model object to convert it into a relay module
# and weights.
# It is important to note that the input tensor name must match what
# is contained in the model.
#
# If you are unsure what that might be, this can be discovered by using
# the visualize.py script within the Tensorflow project.
# See : How do I inspect a .tflite file? `<https://www.tensorflow.org/lite/guide/faq>`_
input_tensor = "dense_4_input"
input_shape = (1,)
input_dtype = "float32"
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict={input_tensor: input_shape}, dtype_dict={input_tensor: input_dtype}
)
######################################################################
# Now we create a build config for relay. turning off two options
# and then calling relay.build which will result in a C source
# file.
#
# .. code-block:: python
#
TARGET = tvm.target.target.micro("host")
with tvm.transform.PassContext(
opt_level=3, config={"tir.disable_vectorize": True}, disabled_pass=["FuseOps"]
):
graph, c_mod, c_params = relay.build(mod, target=TARGET, params=params)
# %%
# Running on simulated device
# ----------------------------------------------
#
# First, compile a static microTVM runtime for the targeted device. In this case, the host simulated
# device is used.
workspace = tvm.micro.Workspace()
compiler = tvm.micro.DefaultCompiler(target=TARGET)
opts = tvm.micro.default_options(os.path.join(tvm.micro.CRT_ROOT_DIR, "host"))
micro_binary = tvm.micro.build_static_runtime(
# the x86 compiler *expects* you to give the exact same dictionary for both
# lib_opts and bin_opts. so the library compiler is mutating lib_opts and
# the binary compiler is expecting those mutations to be in bin_opts.
# TODO(weberlo) fix this very bizarre behavior
workspace,
compiler,
c_mod,
lib_opts=opts["bin_opts"],
bin_opts=opts["bin_opts"],
)
######################################################################
# Next, establish a session with the simulated device and run the
# computation. The `with session` line would typically flash an attached
# microcontroller, but in this tutorial, it simply launches a subprocess
# to stand in for an attached microcontroller.
#
# .. code-block:: python
#
flasher = compiler.flasher()
with tvm.micro.Session(binary=micro_binary, flasher=flasher) as session:
graph_mod = tvm.micro.create_local_graph_runtime(
graph, session.get_system_lib(), session.context
)
# Set the model parameters using the lowered parameters produced by `relay.build`.
graph_mod.set_input(**c_params)
# The model consumes a single float32 value and returns a predicted sine value. To pass the
# input value we construct a tvm.nd.array object with a single contrived number as input. For
# this model values of 0 to 2Pi are acceptable.
graph_mod.set_input(input_tensor, tvm.nd.array(np.array([0.5], dtype="float32")))
graph_mod.run()
tvm_output = graph_mod.get_output(0).asnumpy()
print("result is: " + str(tvm_output))
| 34.32093 | 112 | 0.693454 |
604591ca21cc6a047b625fedd8a51de2a90c6bc1
| 19,849 |
py
|
Python
|
lib/matplotlib/tests/test_collections.py
|
gatagat/matplotlib
|
b8ea343d33da614d199e80e6df047bca9b2f08a1
|
[
"MIT",
"BSD-3-Clause"
] | 1 |
2019-04-15T09:40:53.000Z
|
2019-04-15T09:40:53.000Z
|
lib/matplotlib/tests/test_collections.py
|
gatagat/matplotlib
|
b8ea343d33da614d199e80e6df047bca9b2f08a1
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
lib/matplotlib/tests/test_collections.py
|
gatagat/matplotlib
|
b8ea343d33da614d199e80e6df047bca9b2f08a1
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
"""
Tests specific to the collections module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import io
from nose.tools import assert_equal
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import matplotlib.pyplot as plt
import matplotlib.collections as mcollections
import matplotlib.transforms as mtransforms
from matplotlib.collections import EventCollection
from matplotlib.testing.decorators import cleanup, image_comparison
def generate_EventCollection_plot():
'''
generate the initial collection and plot it
'''
positions = np.array([0., 1., 2., 3., 5., 8., 13., 21.])
extra_positions = np.array([34., 55., 89.])
orientation = 'horizontal'
lineoffset = 1
linelength = .5
linewidth = 2
color = [1, 0, 0, 1]
linestyle = 'solid'
antialiased = True
coll = EventCollection(positions,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle,
antialiased=antialiased
)
fig = plt.figure()
splt = fig.add_subplot(1, 1, 1)
splt.add_collection(coll)
splt.set_title('EventCollection: default')
props = {'positions': positions,
'extra_positions': extra_positions,
'orientation': orientation,
'lineoffset': lineoffset,
'linelength': linelength,
'linewidth': linewidth,
'color': color,
'linestyle': linestyle,
'antialiased': antialiased
}
splt.set_xlim(-1, 22)
splt.set_ylim(0, 2)
return splt, coll, props
@image_comparison(baseline_images=['EventCollection_plot__default'])
def test__EventCollection__get_segments():
'''
check to make sure the default segments have the correct coordinates
'''
_, coll, props = generate_EventCollection_plot()
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
props['orientation'])
@cleanup
def test__EventCollection__get_positions():
'''
check to make sure the default positions match the input positions
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['positions'], coll.get_positions())
@cleanup
def test__EventCollection__get_orientation():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['orientation'], coll.get_orientation())
@cleanup
def test__EventCollection__is_horizontal():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, _ = generate_EventCollection_plot()
assert_equal(True, coll.is_horizontal())
@cleanup
def test__EventCollection__get_linelength():
'''
check to make sure the default linelength matches the input linelength
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['linelength'], coll.get_linelength())
@cleanup
def test__EventCollection__get_lineoffset():
'''
check to make sure the default lineoffset matches the input lineoffset
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['lineoffset'], coll.get_lineoffset())
@cleanup
def test__EventCollection__get_linestyle():
'''
check to make sure the default linestyle matches the input linestyle
'''
_, coll, _ = generate_EventCollection_plot()
assert_equal(coll.get_linestyle(), [(None, None)])
@cleanup
def test__EventCollection__get_color():
'''
check to make sure the default color matches the input color
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['color'], coll.get_color())
check_allprop_array(coll.get_colors(), props['color'])
@image_comparison(baseline_images=['EventCollection_plot__set_positions'])
def test__EventCollection__set_positions():
'''
check to make sure set_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'], props['extra_positions']])
coll.set_positions(new_positions)
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll, new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__add_positions'])
def test__EventCollection__add_positions():
'''
check to make sure add_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][0]])
coll.add_positions(props['extra_positions'][0])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: add_positions')
splt.set_xlim(-1, 35)
@image_comparison(baseline_images=['EventCollection_plot__append_positions'])
def test__EventCollection__append_positions():
'''
check to make sure append_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][2]])
coll.append_positions(props['extra_positions'][2])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: append_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__extend_positions'])
def test__EventCollection__extend_positions():
'''
check to make sure extend_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][1:]])
coll.extend_positions(props['extra_positions'][1:])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: extend_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__switch_orientation'])
def test__EventCollection__switch_orientation():
'''
check to make sure switch_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.switch_orientation()
assert_equal(new_orientation, coll.get_orientation())
assert_equal(False, coll.is_horizontal())
new_positions = coll.get_positions()
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'], new_orientation)
splt.set_title('EventCollection: switch_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(
baseline_images=['EventCollection_plot__switch_orientation__2x'])
def test__EventCollection__switch_orientation_2x():
'''
check to make sure calling switch_orientation twice sets the
orientation back to the default
'''
splt, coll, props = generate_EventCollection_plot()
coll.switch_orientation()
coll.switch_orientation()
new_positions = coll.get_positions()
assert_equal(props['orientation'], coll.get_orientation())
assert_equal(True, coll.is_horizontal())
np.testing.assert_array_equal(props['positions'], new_positions)
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: switch_orientation 2x')
@image_comparison(baseline_images=['EventCollection_plot__set_orientation'])
def test__EventCollection__set_orientation():
'''
check to make sure set_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.set_orientation(new_orientation)
assert_equal(new_orientation, coll.get_orientation())
assert_equal(False, coll.is_horizontal())
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
new_orientation)
splt.set_title('EventCollection: set_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(baseline_images=['EventCollection_plot__set_linelength'])
def test__EventCollection__set_linelength():
'''
check to make sure set_linelength works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_linelength = 15
coll.set_linelength(new_linelength)
assert_equal(new_linelength, coll.get_linelength())
check_segments(coll,
props['positions'],
new_linelength,
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_linelength')
splt.set_ylim(-20, 20)
@image_comparison(baseline_images=['EventCollection_plot__set_lineoffset'])
def test__EventCollection__set_lineoffset():
'''
check to make sure set_lineoffset works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_lineoffset = -5.
coll.set_lineoffset(new_lineoffset)
assert_equal(new_lineoffset, coll.get_lineoffset())
check_segments(coll,
props['positions'],
props['linelength'],
new_lineoffset,
props['orientation'])
splt.set_title('EventCollection: set_lineoffset')
splt.set_ylim(-6, -4)
@image_comparison(baseline_images=['EventCollection_plot__set_linestyle'])
def test__EventCollection__set_linestyle():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = 'dashed'
coll.set_linestyle(new_linestyle)
assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))])
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_ls_dash'],
remove_text=True)
def test__EventCollection__set_linestyle_single_dash():
'''
check to make sure set_linestyle accepts a single dash pattern
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = (0, (6., 6.))
coll.set_linestyle(new_linestyle)
assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))])
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_linewidth'])
def test__EventCollection__set_linewidth():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linewidth = 5
coll.set_linewidth(new_linewidth)
assert_equal(coll.get_linewidth(), new_linewidth)
splt.set_title('EventCollection: set_linewidth')
@image_comparison(baseline_images=['EventCollection_plot__set_color'])
def test__EventCollection__set_color():
'''
check to make sure set_color works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_color = np.array([0, 1, 1, 1])
coll.set_color(new_color)
np.testing.assert_array_equal(new_color, coll.get_color())
check_allprop_array(coll.get_colors(), new_color)
splt.set_title('EventCollection: set_color')
def check_segments(coll, positions, linelength, lineoffset, orientation):
'''
check to make sure all values in the segment are correct, given a
particular set of inputs
note: this is not a test, it is used by tests
'''
segments = coll.get_segments()
if (orientation.lower() == 'horizontal'
or orientation.lower() == 'none' or orientation is None):
# if horizontal, the position in is in the y-axis
pos1 = 1
pos2 = 0
elif orientation.lower() == 'vertical':
# if vertical, the position in is in the x-axis
pos1 = 0
pos2 = 1
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
# test to make sure each segment is correct
for i, segment in enumerate(segments):
assert_equal(segment[0, pos1], lineoffset + linelength / 2.)
assert_equal(segment[1, pos1], lineoffset - linelength / 2.)
assert_equal(segment[0, pos2], positions[i])
assert_equal(segment[1, pos2], positions[i])
def check_allprop(values, target):
'''
check to make sure all values match the given target
note: this is not a test, it is used by tests
'''
for value in values:
assert_equal(value, target)
def check_allprop_array(values, target):
'''
check to make sure all values match the given target if arrays
note: this is not a test, it is used by tests
'''
for value in values:
np.testing.assert_array_equal(value, target)
def test_null_collection_datalim():
col = mcollections.PathCollection([])
col_data_lim = col.get_datalim(mtransforms.IdentityTransform())
assert_array_equal(col_data_lim.get_points(),
mtransforms.Bbox.null().get_points())
@cleanup
def test_add_collection():
# Test if data limits are unchanged by adding an empty collection.
# Github issue #1490, pull #1497.
plt.figure()
ax = plt.axes()
coll = ax.scatter([0, 1], [0, 1])
ax.add_collection(coll)
bounds = ax.dataLim.bounds
coll = ax.scatter([], [])
assert_equal(ax.dataLim.bounds, bounds)
@cleanup
def test_quiver_limits():
ax = plt.axes()
x, y = np.arange(8), np.arange(10)
data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
q = plt.quiver(x, y, u, v)
assert_equal(q.get_datalim(ax.transData).bounds, (0., 0., 7., 9.))
plt.figure()
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.quiver(x, y, np.sin(x), np.cos(y), transform=trans)
assert_equal(ax.dataLim.bounds, (20.0, 30.0, 15.0, 6.0))
@cleanup
def test_barb_limits():
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.barbs(x, y, np.sin(x), np.cos(y), transform=trans)
# The calculated bounds are approximately the bounds of the original data,
# this is because the entire path is taken into account when updating the
# datalim.
assert_array_almost_equal(ax.dataLim.bounds, (20, 30, 15, 6),
decimal=1)
@image_comparison(baseline_images=['EllipseCollection_test_image'],
extensions=['png'],
remove_text=True)
def test_EllipseCollection():
# Test basic functionality
fig, ax = plt.subplots()
x = np.arange(4)
y = np.arange(3)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.ravel(), Y.ravel())).T
ww = X/float(x[-1])
hh = Y/float(y[-1])
aa = np.ones_like(ww) * 20 # first axis is 20 degrees CCW from x axis
ec = mcollections.EllipseCollection(ww, hh, aa,
units='x',
offsets=XY,
transOffset=ax.transData,
facecolors='none')
ax.add_collection(ec)
ax.autoscale_view()
@image_comparison(baseline_images=['polycollection_close'],
extensions=['png'], remove_text=True)
def test_polycollection_close():
from mpl_toolkits.mplot3d import Axes3D
vertsQuad = [
[[0., 0.], [0., 1.], [1., 1.], [1., 0.]],
[[0., 1.], [2., 3.], [2., 2.], [1., 1.]],
[[2., 2.], [2., 3.], [4., 1.], [3., 1.]],
[[3., 0.], [3., 1.], [4., 1.], [4., 0.]]]
fig = plt.figure()
ax = Axes3D(fig)
colors = ['r', 'g', 'b', 'y', 'k']
zpos = list(range(5))
poly = mcollections.PolyCollection(
vertsQuad * len(zpos), linewidth=0.25)
poly.set_alpha(0.7)
# need to have a z-value for *each* polygon = element!
zs = []
cs = []
for z, c in zip(zpos, colors):
zs.extend([z] * len(vertsQuad))
cs.extend([c] * len(vertsQuad))
poly.set_color(cs)
ax.add_collection3d(poly, zs=zs, zdir='y')
# axis limit settings:
ax.set_xlim3d(0, 4)
ax.set_zlim3d(0, 3)
ax.set_ylim3d(0, 4)
@image_comparison(baseline_images=['regularpolycollection_rotate'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_rotate():
xx, yy = np.mgrid[:10, :10]
xy_points = np.transpose([xx.flatten(), yy.flatten()])
rotations = np.linspace(0, 2*np.pi, len(xy_points))
fig, ax = plt.subplots()
for xy, alpha in zip(xy_points, rotations):
col = mcollections.RegularPolyCollection(
4, sizes=(100,), rotation=alpha,
offsets=xy, transOffset=ax.transData)
ax.add_collection(col, autolim=True)
ax.autoscale_view()
@image_comparison(baseline_images=['regularpolycollection_scale'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_scale():
# See issue #3860
class SquareCollection(mcollections.RegularPolyCollection):
def __init__(self, **kwargs):
super(SquareCollection, self).__init__(
4, rotation=np.pi/4., **kwargs)
def get_transform(self):
"""Return transform scaling circle areas to data space."""
ax = self.axes
pts2pixels = 72.0 / ax.figure.dpi
scale_x = pts2pixels * ax.bbox.width / ax.viewLim.width
scale_y = pts2pixels * ax.bbox.height / ax.viewLim.height
return mtransforms.Affine2D().scale(scale_x, scale_y)
fig, ax = plt.subplots()
xy = [(0, 0)]
# Unit square has a half-diagonal of `1 / sqrt(2)`, so `pi * r**2`
# equals...
circle_areas = [np.pi / 2]
squares = SquareCollection(sizes=circle_areas, offsets=xy,
transOffset=ax.transData)
ax.add_collection(squares, autolim=True)
ax.axis([-1, 1, -1, 1])
@cleanup
def test_picking():
fig, ax = plt.subplots()
col = ax.scatter([0], [0], [1000])
fig.savefig(io.BytesIO(), dpi=fig.dpi)
class MouseEvent(object):
pass
event = MouseEvent()
event.x = 325
event.y = 240
found, indices = col.contains(event)
assert found
assert_array_equal(indices['ind'], [0])
@cleanup
def test_linestyle_single_dashes():
plt.scatter([0, 1, 2], [0, 1, 2], linestyle=(0., [2., 2.]))
plt.draw()
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| 32.917081 | 79 | 0.644113 |
47fe98c2e9e934cf82a7e20835eea8e2bd9bb065
| 1,723 |
py
|
Python
|
configs/_base_/models/retinanet_r50_fpn.py
|
evgps/mmdetection_trashcan
|
aaf4237c2c0d473425cdc7b741d3009177b79751
|
[
"Apache-2.0"
] | 367 |
2022-01-14T03:32:25.000Z
|
2022-03-31T04:48:20.000Z
|
configs/_base_/models/retinanet_r50_fpn.py
|
evgps/mmdetection_trashcan
|
aaf4237c2c0d473425cdc7b741d3009177b79751
|
[
"Apache-2.0"
] | 170 |
2020-09-08T12:29:06.000Z
|
2022-03-31T18:28:09.000Z
|
configs/_base_/models/retinanet_r50_fpn.py
|
evgps/mmdetection_trashcan
|
aaf4237c2c0d473425cdc7b741d3009177b79751
|
[
"Apache-2.0"
] | 61 |
2021-07-30T07:51:41.000Z
|
2022-03-30T14:40:02.000Z
|
# model settings
model = dict(
type='RetinaNet',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_input',
num_outs=5),
bbox_head=dict(
type='RetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
| 28.245902 | 56 | 0.525247 |
3497b3c1c96b5f1aa6c11c252dea98cce0f32d74
| 5,836 |
py
|
Python
|
plugins/openstack/pyparts/agent_exceptions.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | 12 |
2020-06-02T14:22:40.000Z
|
2021-04-07T15:58:09.000Z
|
plugins/openstack/pyparts/agent_exceptions.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | 72 |
2020-06-09T00:35:19.000Z
|
2021-09-29T11:00:41.000Z
|
plugins/openstack/pyparts/agent_exceptions.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | 43 |
2020-06-05T15:09:37.000Z
|
2021-09-25T12:28:28.000Z
|
import os
import re
from core import constants
from core.searchtools import SearchDef
from core.plugins.openstack import (
OpenstackEventChecksBase,
AGENT_ERROR_KEY_BY_TIME,
)
from core.searchtools import FileSearcher
YAML_PRIORITY = 7
class AgentExceptionChecks(OpenstackEventChecksBase):
def __init__(self):
# NOTE: we are OpenstackEventChecksBase to get the call structure but
# we dont currently use yaml to define out searches.
super().__init__(yaml_defs_group='agent-exceptions',
searchobj=FileSearcher())
# The following are expected to be WARNING
self._agent_warnings = {
'nova': ['MessagingTimeout',
'DiskNotFound',
],
'neutron': [r'OVS is dead',
r'MessagingTimeout',
]
}
# The following are expected to be ERROR. This is typically used to
# catch events that are not defined as an exception.
self._agent_errors = {
'neutron': [r'RuntimeError']
}
def _add_agent_searches(self, project, agent, data_source, expr_template):
if project.exceptions:
values = "(?:{})".format('|'.join(project.exceptions))
expr = expr_template.format(values)
hint = '( ERROR | Traceback)'
self.searchobj.add_search_term(SearchDef(expr, tag=agent,
hint=hint),
data_source)
warn_exprs = self._agent_warnings.get(project.name, [])
if warn_exprs:
values = "(?:{})".format('|'.join(warn_exprs))
expr = expr_template.format(values)
self.searchobj.add_search_term(SearchDef(expr, tag=agent,
hint='WARNING'),
data_source)
err_exprs = self._agent_errors.get(project.name, [])
if err_exprs:
expr = expr_template.format("(?:{})".
format('|'.join(err_exprs)))
sd = SearchDef(expr, tag=agent, hint='ERROR')
self.searchobj.add_search_term(sd, data_source)
def load(self):
"""Register searches for exceptions as well as any other type of issue
we might want to catch like warnings etc which may not be errors or
exceptions.
"""
for project in self.ost_projects.all.values():
# NOTE: services running under apache may have their logs (e.g.
# barbican-api.log) prepended with apache/mod_wsgi info so do this
# way to account for both. If present, the prefix will be ignored
# and not count towards the result.
wsgi_prefix = r'\[[\w :\.]+\].+\]\s+'
# keystone logs contain the (module_name): at the beginning of the
# line.
keystone_prefix = r'\(\S+\):\s+'
prefix_match = r'(?:{}|{})?'.format(wsgi_prefix, keystone_prefix)
# Sometimes the exception is printed with just the class name
# and sometimes it is printed with a full import path e.g.
# MyExc or somemod.MyExc so we need to account for both.
exc_obj_full_path_match = r'(?:\S+\.)?'
expr_template = (r"^{}([0-9\-]+) (\S+) .+\S+\s({}{{}})[\s:\.]".
format(prefix_match, exc_obj_full_path_match))
for agent, log_path in project.log_paths:
log_path = os.path.join(constants.DATA_ROOT, log_path)
if constants.USE_ALL_LOGS:
log_path = "{}*".format(log_path)
self._add_agent_searches(project, agent, log_path,
expr_template)
def get_exceptions_results(self, results):
""" Process exception search results.
Determine frequency of occurrences. By default they are
grouped/presented by date but can optionally be grouped by time for
more granularity.
"""
agent_exceptions = {}
for result in results:
# strip leading/trailing quotes
exc_tag = result.get(3).strip("'")
if exc_tag not in agent_exceptions:
agent_exceptions[exc_tag] = {}
ts_date = result.get(1)
if AGENT_ERROR_KEY_BY_TIME:
# use hours and minutes only
ts_time = re.compile(r'(\d+:\d+).+').search(result.get(2))[1]
key = "{}_{}".format(ts_date, ts_time)
else:
key = str(ts_date)
if key not in agent_exceptions[exc_tag]:
agent_exceptions[exc_tag][key] = 0
agent_exceptions[exc_tag][key] += 1
if not agent_exceptions:
return
for exc_type in agent_exceptions:
agent_exceptions_sorted = {}
for k, v in sorted(agent_exceptions[exc_type].items(),
key=lambda x: x[0]):
agent_exceptions_sorted[k] = v
agent_exceptions[exc_type] = agent_exceptions_sorted
return agent_exceptions
def run(self, results):
"""Process search results to see if we got any hits."""
issues = {}
for name, info in self.ost_projects.all.items():
for agent in info.daemon_names:
_results = results.find_by_tag(agent)
ret = self.get_exceptions_results(_results)
if ret:
if name not in issues:
issues[name] = {}
issues[name][agent] = ret
if issues:
self._output['agent-exceptions'] = issues
| 39.432432 | 78 | 0.546607 |
f31c87e666a7efcc14ebd7d1895c70c28831e19c
| 1,210 |
py
|
Python
|
example_3.py
|
z86961027/pycatia
|
5dd9b7eb5d21f2261198d6a2af489abafb2f5f32
|
[
"MIT"
] | null | null | null |
example_3.py
|
z86961027/pycatia
|
5dd9b7eb5d21f2261198d6a2af489abafb2f5f32
|
[
"MIT"
] | null | null | null |
example_3.py
|
z86961027/pycatia
|
5dd9b7eb5d21f2261198d6a2af489abafb2f5f32
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3.6
"""
Example 3:
Find all points in the CATPart and print to console -> and export to csv.
"""
import csv
from pycatia import CATIAApplication
from pycatia import CATIAMeasurable
from pycatia import create_measurable
from pycatia import create_spa_workbench
catia = CATIAApplication()
documents = catia.documents()
documents.open(r'tests\CF_catia_measurable_part.CATPart')
document = catia.document()
spa_workbench = create_spa_workbench(document.document)
part = document.part()
selected = document.search_for_items(document, ['Point'])
# export the points to a csv file.
csv_file_name = '__junk__\\exported_points.csv'
with open(csv_file_name, 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
for selection in selected:
reference = part.create_reference(selection)
selection_measurable = create_measurable(spa_workbench, reference)
measurable = CATIAMeasurable(selection_measurable)
# print to console.
print(selection.Name, measurable.get_point(catia))
point_name = selection.Name
x, y, z = measurable.get_point(catia)
csv_writer.writerow([point_name, x, y, z])
| 26.304348 | 77 | 0.733884 |
82854899d2370738e644db4c11407e44676546de
| 3,118 |
py
|
Python
|
data/p2DJ/New/program/qiskit/class/startQiskit_Class158.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/class/startQiskit_Class158.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/class/startQiskit_Class158.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=2
# total number=11
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=6
prog.cz(input_qubit[0],input_qubit[1]) # number=7
prog.h(input_qubit[1]) # number=8
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
prog.y(input_qubit[1]) # number=2
prog.cx(input_qubit[0],input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=3
prog.x(input_qubit[1]) # number=9
prog.x(input_qubit[1]) # number=10
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
prog = circuit1
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
writefile = open("../data/startQiskit_Class158.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.09009 | 80 | 0.618345 |
57a0e15c4d3fd56a25836efeaae7fd39ea617ef6
| 13,693 |
py
|
Python
|
code/venv/lib/python3.6/site-packages/sqlparse/lexer.py
|
jhkuang11/UniTrade
|
5f68b853926e167936b58c8543b8f95ebd6f5211
|
[
"MIT"
] | null | null | null |
code/venv/lib/python3.6/site-packages/sqlparse/lexer.py
|
jhkuang11/UniTrade
|
5f68b853926e167936b58c8543b8f95ebd6f5211
|
[
"MIT"
] | 10 |
2020-06-05T19:42:26.000Z
|
2022-03-11T23:38:35.000Z
|
code/venv/lib/python3.6/site-packages/sqlparse/lexer.py
|
jhkuang11/UniTrade
|
5f68b853926e167936b58c8543b8f95ebd6f5211
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Andi Albrecht, [email protected]
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""SQL Lexer"""
# This code is based on the SqlLexer in pygments.
# http://pygments.org/
# It's separated from the rest of pygments to increase performance
# and to allow some customizations.
import re
import sys
from sqlparse import tokens
from sqlparse.keywords import KEYWORDS, KEYWORDS_COMMON
from io import StringIO
class include(str):
pass
class combined(tuple):
"""Indicates a state combined from multiple states."""
def __new__(cls, *args):
return tuple.__new__(cls, args)
def __init__(self, *args):
# tuple.__init__ doesn't do anything
pass
def is_keyword(value):
test = value.upper()
return KEYWORDS_COMMON.get(test, KEYWORDS.get(test, tokens.Name)), value
def apply_filters(stream, filters, lexer=None):
"""
Use this method to apply an iterable of filters to
a stream. If lexer is given it's forwarded to the
filter, otherwise the filter receives `None`.
"""
def _apply(filter_, stream):
for token in filter_.filter(lexer, stream):
yield token
for filter_ in filters:
stream = _apply(filter_, stream)
return stream
class LexerMeta(type):
"""
Metaclass for Lexer, creates the self._tokens attribute from
self.tokens on the first instantiation.
"""
def _process_state(cls, unprocessed, processed, state):
assert type(state) is str, "wrong state name %r" % state
assert state[0] != '#', "invalid state name %r" % state
if state in processed:
return processed[state]
tokenlist = processed[state] = []
rflags = cls.flags
for tdef in unprocessed[state]:
if isinstance(tdef, include):
# it's a state reference
assert tdef != state, "circular state reference %r" % state
tokenlist.extend(cls._process_state(
unprocessed, processed, str(tdef)))
continue
assert type(tdef) is tuple, "wrong rule def %r" % tdef
try:
rex = re.compile(tdef[0], rflags).match
except Exception as err:
raise ValueError(("uncompilable regex %r in state"
" %r of %r: %s"
% (tdef[0], state, cls, err)))
assert type(tdef[1]) is tokens._TokenType or callable(tdef[1]), \
('token type must be simple type or callable, not %r'
% (tdef[1],))
if len(tdef) == 2:
new_state = None
else:
tdef2 = tdef[2]
if isinstance(tdef2, str):
# an existing state
if tdef2 == '#pop':
new_state = -1
elif tdef2 in unprocessed:
new_state = (tdef2,)
elif tdef2 == '#push':
new_state = tdef2
elif tdef2[:5] == '#pop:':
new_state = -int(tdef2[5:])
else:
assert False, 'unknown new state %r' % tdef2
elif isinstance(tdef2, combined):
# combine a new state from existing ones
new_state = '_tmp_%d' % cls._tmpname
cls._tmpname += 1
itokens = []
for istate in tdef2:
assert istate != state, \
'circular state ref %r' % istate
itokens.extend(cls._process_state(unprocessed,
processed, istate))
processed[new_state] = itokens
new_state = (new_state,)
elif isinstance(tdef2, tuple):
# push more than one state
for state in tdef2:
assert (state in unprocessed or
state in ('#pop', '#push')), \
'unknown new state ' + state
new_state = tdef2
else:
assert False, 'unknown new state def %r' % tdef2
tokenlist.append((rex, tdef[1], new_state))
return tokenlist
def process_tokendef(cls):
cls._all_tokens = {}
cls._tmpname = 0
processed = cls._all_tokens[cls.__name__] = {}
#tokendefs = tokendefs or cls.tokens[name]
for state in list(cls.tokens.keys()):
cls._process_state(cls.tokens, processed, state)
return processed
def __call__(cls, *args, **kwds):
if not hasattr(cls, '_tokens'):
cls._all_tokens = {}
cls._tmpname = 0
if hasattr(cls, 'token_variants') and cls.token_variants:
# don't process yet
pass
else:
cls._tokens = cls.process_tokendef()
return type.__call__(cls, *args, **kwds)
class Lexer(object, metaclass=LexerMeta):
encoding = 'utf-8'
stripall = False
stripnl = False
tabsize = 0
flags = re.IGNORECASE | re.UNICODE
tokens = {
'root': [
(r'(--|# ).*?(\r\n|\r|\n)', tokens.Comment.Single),
# $ matches *before* newline, therefore we have two patterns
# to match Comment.Single
(r'(--|# ).*?$', tokens.Comment.Single),
(r'(\r\n|\r|\n)', tokens.Newline),
(r'\s+', tokens.Whitespace),
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r':=', tokens.Assignment),
(r'::', tokens.Punctuation),
(r'[*]', tokens.Wildcard),
(r'CASE\b', tokens.Keyword), # extended CASE(foo)
(r"`(``|[^`])*`", tokens.Name),
(r"´(´´|[^´])*´", tokens.Name),
(r'\$([^\W\d]\w*)?\$', tokens.Name.Builtin),
(r'\?{1}', tokens.Name.Placeholder),
(r'%\(\w+\)s', tokens.Name.Placeholder),
(r'%s', tokens.Name.Placeholder),
(r'[$:?]\w+', tokens.Name.Placeholder),
# FIXME(andi): VALUES shouldn't be listed here
# see https://github.com/andialbrecht/sqlparse/pull/64
(r'VALUES', tokens.Keyword),
(r'(@|##|#)[^\W\d_]\w+', tokens.Name),
# IN is special, it may be followed by a parenthesis, but
# is never a functino, see issue183
(r'in\b(?=[ (])?', tokens.Keyword),
(r'[^\W\d_]\w*(?=[.(])', tokens.Name), # see issue39
(r'[-]?0x[0-9a-fA-F]+', tokens.Number.Hexadecimal),
(r'[-]?[0-9]*(\.[0-9]+)?[eE][-]?[0-9]+', tokens.Number.Float),
(r'[-]?[0-9]*\.[0-9]+', tokens.Number.Float),
(r'[-]?[0-9]+', tokens.Number.Integer),
(r"'(''|\\\\|\\'|[^'])*'", tokens.String.Single),
# not a real string literal in ANSI SQL:
(r'(""|".*?[^\\]")', tokens.String.Symbol),
# sqlite names can be escaped with [square brackets]. left bracket
# cannot be preceded by word character or a right bracket --
# otherwise it's probably an array index
(r'(?<![\w\])])(\[[^\]]+\])', tokens.Name),
(r'((LEFT\s+|RIGHT\s+|FULL\s+)?(INNER\s+|OUTER\s+|STRAIGHT\s+)?|(CROSS\s+|NATURAL\s+)?)?JOIN\b', tokens.Keyword),
(r'END(\s+IF|\s+LOOP)?\b', tokens.Keyword),
(r'NOT NULL\b', tokens.Keyword),
(r'CREATE(\s+OR\s+REPLACE)?\b', tokens.Keyword.DDL),
(r'DOUBLE\s+PRECISION\b', tokens.Name.Builtin),
(r'(?<=\.)[^\W\d_]\w*', tokens.Name),
(r'[^\W\d]\w*', is_keyword),
(r'[;:()\[\],\.]', tokens.Punctuation),
(r'[<>=~!]+', tokens.Operator.Comparison),
(r'[+/@#%^&|`?^-]+', tokens.Operator),
],
'multiline-comments': [
(r'/\*', tokens.Comment.Multiline, 'multiline-comments'),
(r'\*/', tokens.Comment.Multiline, '#pop'),
(r'[^/\*]+', tokens.Comment.Multiline),
(r'[/*]', tokens.Comment.Multiline),
]}
def __init__(self):
self.filters = []
def add_filter(self, filter_, **options):
from sqlparse.filters import Filter
if not isinstance(filter_, Filter):
filter_ = filter_(**options)
self.filters.append(filter_)
def _decode(self, text):
if sys.version_info[0] == 3:
if isinstance(text, str):
return text
if self.encoding == 'guess':
try:
text = text.decode('utf-8')
if text.startswith('\ufeff'):
text = text[len('\ufeff'):]
except UnicodeDecodeError:
text = text.decode('latin1')
else:
try:
text = text.decode(self.encoding)
except UnicodeDecodeError:
text = text.decode('unicode-escape')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
return text
def get_tokens(self, text, unfiltered=False):
"""
Return an iterable of (tokentype, value) pairs generated from
`text`. If `unfiltered` is set to `True`, the filtering mechanism
is bypassed even if filters are defined.
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
if isinstance(text, str):
if self.stripall:
text = text.strip()
elif self.stripnl:
text = text.strip('\n')
if sys.version_info[0] < 3 and isinstance(text, str):
text = StringIO(text.encode('utf-8'))
self.encoding = 'utf-8'
else:
text = StringIO(text)
def streamer():
for i, t, v in self.get_tokens_unprocessed(text):
yield t, v
stream = streamer()
if not unfiltered:
stream = apply_filters(stream, self.filters, self)
return stream
def get_tokens_unprocessed(self, stream, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
pos = 0
tokendefs = self._tokens # see __call__, pylint:disable=E1101
statestack = list(stack)
statetokens = tokendefs[statestack[-1]]
known_names = {}
text = stream.read()
text = self._decode(text)
while 1:
for rexmatch, action, new_state in statetokens:
m = rexmatch(text, pos)
if m:
value = m.group()
if value in known_names:
yield pos, known_names[value], value
elif type(action) is tokens._TokenType:
yield pos, action, value
elif hasattr(action, '__call__'):
ttype, value = action(value)
known_names[value] = ttype
yield pos, ttype, value
else:
for item in action(self, m):
yield item
pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
statestack.pop()
elif state == '#push':
statestack.append(statestack[-1])
elif (
# Ugly hack - multiline-comments
# are not stackable
state != 'multiline-comments'
or not statestack
or statestack[-1] != 'multiline-comments'
):
statestack.append(state)
elif isinstance(new_state, int):
# pop
del statestack[new_state:]
elif new_state == '#push':
statestack.append(statestack[-1])
else:
assert False, "wrong state def: %r" % new_state
statetokens = tokendefs[statestack[-1]]
break
else:
try:
if text[pos] == '\n':
# at EOL, reset state to "root"
pos += 1
statestack = ['root']
statetokens = tokendefs['root']
yield pos, tokens.Text, '\n'
continue
yield pos, tokens.Error, text[pos]
pos += 1
except IndexError:
break
def tokenize(sql, encoding=None):
"""Tokenize sql.
Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
of ``(token type, value)`` items.
"""
lexer = Lexer()
if encoding is not None:
lexer.encoding = encoding
return lexer.get_tokens(sql)
| 37.930748 | 125 | 0.482071 |
9913923cb7807d6ac8130a8c6e2f18e59d2e8a0f
| 11,762 |
py
|
Python
|
pandas/core/dtypes/missing.py
|
cclauss/pandas
|
692b5eeeff9b8e8c750f3e64db0c39dc149a73e8
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | 1 |
2021-05-10T10:02:04.000Z
|
2021-05-10T10:02:04.000Z
|
pandas/core/dtypes/missing.py
|
cclauss/pandas
|
692b5eeeff9b8e8c750f3e64db0c39dc149a73e8
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null |
pandas/core/dtypes/missing.py
|
cclauss/pandas
|
692b5eeeff9b8e8c750f3e64db0c39dc149a73e8
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"BSD-3-Clause"
] | null | null | null |
"""
missing types & inference
"""
import numpy as np
from pandas._libs import lib
from pandas._libs.tslib import NaT, iNaT
from .generic import (ABCMultiIndex, ABCSeries,
ABCIndexClass, ABCGeneric)
from .common import (is_string_dtype, is_datetimelike,
is_datetimelike_v_numeric, is_float_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype, is_interval_dtype,
is_complex_dtype, is_categorical_dtype,
is_string_like_dtype, is_bool_dtype,
is_integer_dtype, is_dtype_equal,
needs_i8_conversion, _ensure_object,
pandas_dtype,
is_scalar,
is_object_dtype,
is_integer,
_TD_DTYPE,
_NS_DTYPE)
from .inference import is_list_like
def isnull(obj):
"""Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
Parameters
----------
arr : ndarray or object value
Object to check for null-ness
Returns
-------
isnulled : array-like of bool or bool
Array or bool indicating whether an object is null or if an array is
given which of the element is null.
See also
--------
pandas.notnull: boolean inverse of pandas.isnull
"""
return _isnull(obj)
def _isnull_new(obj):
if is_scalar(obj):
return lib.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isnull_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isnull(func=isnull))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike(np.asarray(obj))
else:
return obj is None
def _isnull_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
return lib.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)):
return _isnull_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isnull(func=_isnull_old))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike_old(np.asarray(obj))
else:
return obj is None
_isnull = _isnull_new
def _use_inf_as_null(key):
"""Option change callback for null/inf behaviour
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* http://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
from pandas.core.config import get_option
flag = get_option(key)
if flag:
globals()['_isnull'] = _isnull_old
else:
globals()['_isnull'] = _isnull_new
def _isnull_ndarraylike(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if is_string_dtype(dtype):
if is_categorical_dtype(values):
from pandas import Categorical
if not isinstance(values, Categorical):
values = values.values
result = values.isnull()
elif is_interval_dtype(values):
from pandas import IntervalIndex
result = IntervalIndex(obj).isnull()
else:
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj(values.ravel())
result[...] = vec.reshape(shape)
elif needs_i8_conversion(obj):
# this is the NaT pattern
result = values.view('i8') == iNaT
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def _isnull_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if is_string_dtype(dtype):
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj_old(values.ravel())
result[:] = vec.reshape(shape)
elif is_datetime64_dtype(dtype):
# this is the NaT pattern
result = values.view('i8') == iNaT
else:
result = ~np.isfinite(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def notnull(obj):
"""Replacement for numpy.isfinite / -numpy.isnan which is suitable for use
on object arrays.
Parameters
----------
arr : ndarray or object value
Object to check for *not*-null-ness
Returns
-------
isnulled : array-like of bool or bool
Array or bool indicating whether an object is *not* null or if an array
is given which of the element is *not* null.
See also
--------
pandas.isnull : boolean inverse of pandas.notnull
"""
res = isnull(obj)
if is_scalar(res):
return not res
return ~res
def is_null_datelike_scalar(other):
""" test whether the object is a null datelike, e.g. Nat
but guard against passing a non-scalar """
if other is NaT or other is None:
return True
elif is_scalar(other):
# a timedelta
if hasattr(other, 'dtype'):
return other.view('i8') == iNaT
elif is_integer(other) and other == iNaT:
return True
return isnull(other)
return False
def _is_na_compat(arr, fill_value=np.nan):
"""
Parameters
----------
arr: a numpy array
fill_value: fill value, default to np.nan
Returns
-------
True if we can fill using this fill_value
"""
dtype = arr.dtype
if isnull(fill_value):
return not (is_bool_dtype(dtype) or
is_integer_dtype(dtype))
return True
def array_equivalent(left, right, strict_nan=False):
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
# shape compat
if left.shape != right.shape:
return False
# Object arrays can contain None, NaN and NaT.
# string dtypes must be come to this path for NumPy 1.7.1 compat
if is_string_dtype(left) or is_string_dtype(right):
if not strict_nan:
# isnull considers NaN and None to be equivalent.
return lib.array_equivalent_object(
_ensure_object(left.ravel()), _ensure_object(right.ravel()))
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
if (not isinstance(right_value, float) or
not np.isnan(right_value)):
return False
else:
if left_value != right_value:
return False
return True
# NaNs can occur in float and complex arrays.
if is_float_dtype(left) or is_complex_dtype(left):
return ((left == right) | (isnull(left) & isnull(right))).all()
# numpy will will not allow this type of datetimelike vs integer comparison
elif is_datetimelike_v_numeric(left, right):
return False
# M8/m8
elif needs_i8_conversion(left) and needs_i8_conversion(right):
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.view('i8')
right = right.view('i8')
# NaNs cannot occur otherwise.
try:
return np.array_equal(left, right)
except AttributeError:
# see gh-13388
#
# NumPy v1.7.1 has a bug in its array_equal
# function that prevents it from correctly
# comparing two arrays with complex dtypes.
# This bug is corrected in v1.8.0, so remove
# this try-except block as soon as we stop
# supporting NumPy versions < 1.8.0
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.tolist()
right = right.tolist()
return left == right
def _infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
val = np.array(val, copy=False)
if is_datetimelike(val):
return np.array('NaT', dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(_ensure_object(val))
if dtype in ['datetime', 'datetime64']:
return np.array('NaT', dtype=_NS_DTYPE)
elif dtype in ['timedelta', 'timedelta64']:
return np.array('NaT', dtype=_TD_DTYPE)
return np.nan
def _maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatiable fill_value and arr dtype, then fill
"""
if _is_na_compat(arr, fill_value):
arr.fill(fill_value)
return arr
def na_value_for_dtype(dtype):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
Returns
-------
np.dtype or a pandas dtype
"""
dtype = pandas_dtype(dtype)
if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or
is_timedelta64_dtype(dtype)):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
return 0
elif is_bool_dtype(dtype):
return False
return np.nan
def remove_na_arraylike(arr):
"""
Return array-like containing only true/non-NaN values, possibly empty.
"""
return arr[notnull(lib.values_from_object(arr))]
| 29.113861 | 79 | 0.616052 |
f8bf6d3b5d5a71532178fcda5c0fe8d1c4ea3956
| 1,863 |
py
|
Python
|
puzzleSequenceLogic.py
|
yanamal/html-hax-clean
|
03aefcef767cf982075598af7332b2aa4ef363a7
|
[
"MIT"
] | null | null | null |
puzzleSequenceLogic.py
|
yanamal/html-hax-clean
|
03aefcef767cf982075598af7332b2aa4ef363a7
|
[
"MIT"
] | null | null | null |
puzzleSequenceLogic.py
|
yanamal/html-hax-clean
|
03aefcef767cf982075598af7332b2aa4ef363a7
|
[
"MIT"
] | null | null | null |
import json, logging
from google.appengine.api import users
from flask import Flask
from user import UserProfile
app = Flask(__name__)
# given the name of a puzzle the user has just completed,
# generate a completion message for the user (as a short HTML snippet).
# The message includes a link to the next puzzle they should do, if any.
def userCompletedPuzzle(puzzle):
# get the user's profile, and record that they've solved this puzzle:
profile = UserProfile.get_by_user(users.get_current_user())
profile.solved_puzzles.append(puzzle)
message = 'correct! ' # start composing the message displayed to the user.
nextPuzzle = getNextPuzzle(puzzle) # use the current puzzle's path to get the puzzle that should be next.
if nextPuzzle:
# if there is a next puzzle, then link to it
message += '<a href='+nextPuzzle+'>Next puzzle!</a>'
# also, change the user's current puzzle to nextPuzzle:
profile.current_puzzle = nextPuzzle
else:
# if there is not a next puzzle, tell the user they are all done
message += 'All done!'
profile.put() # commit all the changes we've made to the user profile
return message
# given the name of the current puzzle,
# decide what the next puzzle should be.
def getNextPuzzle(curr):
with app.open_resource('data/puzzleSequence.json') as f:
puzzles = json.load(f)
nextp = puzzles[0] # default value: if we can't figure out the proper next puzzle, we'll just return the first one.
if curr and (curr in puzzles):
i = puzzles.index(curr) # This isn't very efficient, but anything nicer would require a more complex puzzleSequence data structure.
if (i+1) >= len(puzzles):
return None # if this was the last puzzle, you're done!
nextp = puzzles[i+1]
return nextp
| 43.325581 | 143 | 0.690284 |
8edb2826354210c52545291b0af389d6dc66fa0b
| 55 |
py
|
Python
|
mechmat/properties/thermal/__init__.py
|
mecheng/mechmat
|
2c3bc43dce85d4827450a8ad69311ca49bb0d035
|
[
"MIT"
] | 1 |
2019-05-13T09:19:13.000Z
|
2019-05-13T09:19:13.000Z
|
mechmat/properties/thermal/__init__.py
|
mecheng/mechmat
|
2c3bc43dce85d4827450a8ad69311ca49bb0d035
|
[
"MIT"
] | 25 |
2019-05-24T18:59:38.000Z
|
2021-06-01T23:44:25.000Z
|
mechmat/properties/thermal/__init__.py
|
jellespijker/mechmat
|
2c3bc43dce85d4827450a8ad69311ca49bb0d035
|
[
"MIT"
] | 1 |
2020-09-06T12:38:08.000Z
|
2020-09-06T12:38:08.000Z
|
from mechmat.properties.thermal.thermal import Thermal
| 27.5 | 54 | 0.872727 |
9605e1919ab5c1b5ff33b3e313ed150f5a117178
| 2,574 |
py
|
Python
|
scripts/changelog.py
|
FrNecas/deployment
|
59a95f2b6b94014b5f4a17a2af47f92c8c79d5f7
|
[
"MIT"
] | null | null | null |
scripts/changelog.py
|
FrNecas/deployment
|
59a95f2b6b94014b5f4a17a2af47f92c8c79d5f7
|
[
"MIT"
] | null | null | null |
scripts/changelog.py
|
FrNecas/deployment
|
59a95f2b6b94014b5f4a17a2af47f92c8c79d5f7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import re
from typing import List, Optional
import click
from git import Commit, Repo
NOT_IMPORTANT_VALUES = ["n/a", "none", "none.", ""]
RELEASE_NOTES_TAG = "RELEASE NOTES"
RELEASE_NOTES_RE = f"{RELEASE_NOTES_TAG} BEGIN(.+){RELEASE_NOTES_TAG} END"
def get_relevant_commits(repository: Repo, ref: str) -> List[Commit]:
range = f"{ref}..HEAD"
return list(repository.iter_commits(rev=range, merges=True))
def get_pr_data(message: str) -> str:
"""
obtain PR ID and produce a markdown link to it
"""
# Merge pull request #1483 from majamassarini/fix/1357
first_line = message.split("\n")[0]
fourth_word = first_line.split(" ")[3]
return fourth_word
def convert_message(message: str) -> Optional[str]:
"""Extract release note from the commit message,
return None if there is no release note"""
if RELEASE_NOTES_TAG in message:
# new
if match := re.findall(RELEASE_NOTES_RE, message):
return match[0]
else:
return None
else:
# old
cleared_message = message.split("Reviewed-by")[0].strip()
release_note = cleared_message.split("\n\n")[-1].strip()
if "Signed-off-by" in release_note:
# empty release note
return None
return release_note
def get_changelog(commits: List[Commit]) -> str:
changelog = ""
for commit in commits:
message = convert_message(commit.message)
if messsage and message.lower() not in NOT_IMPORTANT_VALUES:
suffix = get_pr_data(commit.message)
changelog += f"- {message} ({suffix})\n"
return changelog
@click.command(
short_help="Get the changelog from the merge commits",
help="""Get the changelog from the merge commits
The script goes through the merge commits since the specified REF
and get the changelog entry from the commit message.
By now, we parse a last paragraph of the pull-request description
(that is contained in the commit message).
In the future, we will have an explicit divider.
""",
)
@click.option(
"--git-dir",
default=".",
type=click.Path(dir_okay=True, file_okay=False),
help="Git repository used for getting the changelog. "
"Current directory is used by default.",
)
@click.argument("ref", type=click.STRING)
def changelog(git_dir, ref):
print(get_changelog(get_relevant_commits(Repo(git_dir), ref)))
if __name__ == "__main__":
changelog()
| 29.930233 | 74 | 0.67094 |
a062f7d901df1c14266fcdfa4f15e8371916cab1
| 4,490 |
py
|
Python
|
parl_practice/PyGame/Snake/DQN/train.py
|
fluffyrita/ReinforcementLearning
|
8ec3394dd884b7ba742856129b7923d77e38573a
|
[
"MIT"
] | 1 |
2021-08-20T03:38:28.000Z
|
2021-08-20T03:38:28.000Z
|
parl_practice/PyGame/Snake/DQN/train.py
|
fluffyrita/ReinforcementLearning
|
8ec3394dd884b7ba742856129b7923d77e38573a
|
[
"MIT"
] | 1 |
2021-03-11T16:31:35.000Z
|
2021-03-11T16:31:35.000Z
|
parl_practice/PyGame/Snake/DQN/train.py
|
fluffyrita/ReinforcementLearning
|
8ec3394dd884b7ba742856129b7923d77e38573a
|
[
"MIT"
] | 1 |
2021-03-11T14:01:43.000Z
|
2021-03-11T14:01:43.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-*- coding: utf-8 -*-
from ple.games.snake import Snake
from ple import PLE
import parl
from parl import layers
import paddle.fluid as fluid
import numpy as np
import os
from parl.utils import logger
from parl.algorithms import DQN
import random
import collections
from parl.utils import logger # 日志打印工具
from model import Model
from algorithm import DQN # from parl.algorithms import DQN # parl >= 1.3.1
from agent import Agent
from replay_memory import ReplayMemory
from utils import get_obs
LEARN_FREQ = 5 # 训练频率,不需要每一个step都learn,攒一些新增经验后再learn,提高效率
MEMORY_SIZE = 20000 # replay memory的大小,越大越占用内存
MEMORY_WARMUP_SIZE = 200 # replay_memory 里需要预存一些经验数据,再从里面sample一个batch的经验让agent去learn
BATCH_SIZE = 32 # 每次给agent learn的数据数量,从replay memory随机里sample一批数据出来
LEARNING_RATE = 0.001 # 学习率
GAMMA = 0.99 # reward 的衰减因子,一般取 0.9 到 0.999 不等
# 训练一个episode
def run_episode(env, agent, rpm):
total_reward = 0
env.reset_game()
obs = get_obs(env)
step = 0
while True:
step += 1
action_index = agent.sample(obs) # 采样动作,所有动作都有概率被尝试到
action = env.getActionSet()[action_index]
# 行动
reward = env.act(action)
next_obs = get_obs(env)
done = env.game_over()
rpm.append((obs, action_index, reward, next_obs, done))
# train model
if (len(rpm) > MEMORY_WARMUP_SIZE) and (step % LEARN_FREQ == 0):
(batch_obs, batch_action, batch_reward, batch_next_obs,
batch_done) = rpm.sample(BATCH_SIZE)
train_loss = agent.learn(batch_obs, batch_action, batch_reward,
batch_next_obs,
batch_done) # s,a,r,s',done
total_reward += reward
obs = next_obs
if done:
break
return total_reward
# 评估 agent, 跑 5 个episode,总reward求平均
def evaluate(env, agent, render=False):
eval_reward = []
for i in range(5):
env.reset_game()
obs = get_obs(env)
episode_reward = 0
while True:
action_index = agent.predict(obs) # 选取最优动作
action = env.getActionSet()[action_index]
reward = env.act(action)
obs = get_obs(env)
episode_reward += reward
if render:
env.getScreenRGB()
if env.game_over():
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
def main():
# 创建环境
game = Snake(width=200, height=200, init_length=5)
p = PLE(game, fps=30, display_screen=False, force_fps=True)
# 根据parl框架构建agent
print(p.getActionSet())
act_dim = len(p.getActionSet())
obs_dim = 200*200
rpm = ReplayMemory(MEMORY_SIZE) # DQN的经验回放池
model = Model(act_dim=act_dim)
alg = DQN(model, act_dim=act_dim, gamma=GAMMA, lr=LEARNING_RATE)
agent = Agent(alg, obs_dim=obs_dim, act_dim=act_dim, e_greed_decrement=1e-6, e_greed=0.2) # e_greed有一定概率随机选取动作,探索
# 加载模型
# if os.path.exists('./dqn_snake_400.ckpt'):
# agent.restore('./dqn_snake_400.ckpt')
# 先往经验池里存一些数据,避免最开始训练的时候样本丰富度不够
while len(rpm) < MEMORY_WARMUP_SIZE:
run_episode(p, agent, rpm)
max_episode = 2000000
# 开始训练
episode = 0
best_reward = -float('inf')
while episode < max_episode: # 训练max_episode个回合,test部分不计算入episode数量
# train part
for i in range(0, 100):
total_reward = run_episode(p, agent, rpm)
episode += 1
# test part
eval_reward = evaluate(p, agent, render=True) # render=True 查看显示效果
if eval_reward>best_reward:
best_reward = eval_reward
agent.save('model_dir/dqn_snake_{}.ckpt'.format(episode))
logger.info('episode:{} e_greed:{} test_reward:{}'.format(
episode, agent.e_greed, eval_reward))
if __name__ == '__main__':
main()
| 31.843972 | 118 | 0.653229 |
99176030bd394092342caa48aa766d4fb840cd2d
| 951 |
py
|
Python
|
keyboard-termux.py
|
fossabot/currency-converter
|
5b82106758dcc3c3184524e84b1cf449fb0a6d86
|
[
"MIT"
] | 1 |
2020-04-19T11:25:06.000Z
|
2020-04-19T11:25:06.000Z
|
keyboard-termux.py
|
fossabot/currency-converter
|
5b82106758dcc3c3184524e84b1cf449fb0a6d86
|
[
"MIT"
] | 1 |
2020-04-19T04:39:11.000Z
|
2020-04-19T04:39:11.000Z
|
keyboard-termux.py
|
fossabot/currency-converter
|
5b82106758dcc3c3184524e84b1cf449fb0a6d86
|
[
"MIT"
] | 5 |
2020-02-21T08:36:44.000Z
|
2020-04-19T04:35:56.000Z
|
import os
from time import sleep
a ='\033[92m'
b ='\033[91m'
c ='\033[0m'
os.system('clear')
print(a+'\t Advanced Shortcut Termux ')
print(b+'\t Dimas Lanjaka')
print('\t https://www.webmanajemen.com')
print('\t Facebook : https://fb.me/dimaslanjaka1')
print('\t https://github.com/dimaslanjaka')
print(a+'+'*40)
print('\n Process..')
sleep(1)
print(b+'\n[!] Getting default termux settings')
sleep(1)
try:
os.mkdir('/data/data/com.termux/files/home/.termux')
except:
pass
print(a+'[!]Success !')
sleep(1)
print(b+'\n[!] Adding files..')
sleep(1)
key = 'extra-keys = [["ESC", "/", "-", "HOME", "UP", "END", "PGUP"], ["TAB", "CTRL", "ALT", "LEFT", "DOWN", "RIGHT", "PGDN"]]'
Control = open('/data/data/com.termux/files/home/.termux/termux.properties','w')
Control.write(key)
Control.close()
sleep(1)
print(a+'[!] Processing !')
sleep(1)
print(b+'\n[!] Please wait...')
sleep(2)
os.system('termux-reload-settings')
print(a+'[!] Success')
| 25.026316 | 126 | 0.633018 |
178d054d8accffbaf02aa4d05de42ef23aba7fd0
| 2,484 |
py
|
Python
|
wrapper.py
|
sevmardi/LunarLander
|
154902829da0d559c0720b94fd38c09d9d0713b5
|
[
"MIT"
] | null | null | null |
wrapper.py
|
sevmardi/LunarLander
|
154902829da0d559c0720b94fd38c09d9d0713b5
|
[
"MIT"
] | null | null | null |
wrapper.py
|
sevmardi/LunarLander
|
154902829da0d559c0720b94fd38c09d9d0713b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Environment wrapper class for Reinforcement Learning PA - Spring 2018
Details:
File name: wrapper.py
Author: Anna Latour
Date created: 19 March 2018
Date last modified: 19 March 2018
Python Version: 3.4
Description:
Implementation of a superclass for environment wrappers. A wrapper allows
you to model the environment you need to learn by e.g. providing functions
that aid discretisation.
Related files:
main.py
cartpole_wrapper.py
"""
import gym
class Wrapper(object):
""" Wrapper: A Supper class for an environment helps you to specify how you model the
environment such that it can interface with a general Reinforcement Learning
agent."""
def __init__(self, env_name, actions):
self._env = gym.make(env_name)
self._actions = actions
self._number_of_steps = 0
def reset(self):
self._number_of_steps = 0
return self._env.reset()
def action_space(self):
return self._env.action_space
def observation_space(self):
return self._env.observation_space
def step(self, action):
self._number_of_steps += 1
return self._env.step(action)
def close(self):
self._env.close()
def actions(self):
if self._actions is None:
raise NotImplementedError("Subclass must define actions")
return self._actions
def solved(self, *args, **kwargs):
raise NotImplementedError("Subclass must implement abstract method")
class LunarLanderWrapper(Wrapper):
""" TODO: Add a description for your wrapper
"""
_actions = [] # TODO: define list of actions (HINT: check LunarLander-v2 source code to figure out what those actions are)
_penalty = []
def __init__(self):
super().__init__(env_name='LunarLander-v2', actions=self._actions) # Don't change environment name
actions = [0, 1, 2] # left (0), right (1), bottom (2)
_penalty = 0
def solved(self, rewards):
if (len(rewards) >= 100) and (sum(1 for r in rewards if r >= 200) >= 10):
return True
return False
def episode_over(self):
#I guess it should return true if module has landed, crashed or gone out of frame?
pass
#return True if
def penalty(self):
return self._penalty
# TODO: implement all other functions and methods needed for your wrapper
| 28.551724 | 128 | 0.648953 |
a9053e8a421437e81c3825d5444c790d86395035
| 488 |
py
|
Python
|
processing_scripts/icon_generator/to_icons.py
|
CorentG/Pokecube-Issues-and-Wiki
|
690af5d8499561f65f761fd49fbf5fc2bc85c4c3
|
[
"MIT"
] | 24 |
2019-02-02T20:37:53.000Z
|
2022-02-09T13:51:41.000Z
|
processing_scripts/icon_generator/to_icons.py
|
CorentG/Pokecube-Issues-and-Wiki
|
690af5d8499561f65f761fd49fbf5fc2bc85c4c3
|
[
"MIT"
] | 671 |
2018-08-20T08:46:35.000Z
|
2022-03-26T00:11:43.000Z
|
processing_scripts/icon_generator/to_icons.py
|
CorentG/Pokecube-Issues-and-Wiki
|
690af5d8499561f65f761fd49fbf5fc2bc85c4c3
|
[
"MIT"
] | 68 |
2018-09-25T21:03:40.000Z
|
2022-02-25T19:59:51.000Z
|
from PIL import Image
import PIL
import os
from glob import glob
imgs = [y for x in os.walk(".") for y in glob(os.path.join(x[0], '*.png'))]
size = 32, 32
lastdir = None
for file in imgs:
img = Image.open(file)
img = img.resize(size,resample=PIL.Image.HAMMING)
file = file.replace('img', 'icon', 1)
dir = os.path.dirname(file)
try:
os.makedirs(dir)
except:
pass
if dir!=lastdir:
print(dir)
lastdir = dir
img.save(file, "png")
| 21.217391 | 75 | 0.604508 |
9803bb9cc17e555df28cbe75b2393cf01e5c0bdf
| 2,264 |
py
|
Python
|
distanceFinder.py
|
Deoy12/Fun_Projects
|
46355da4f4d98a3bfe7bc423291ea862c6f0e48f
|
[
"MIT"
] | null | null | null |
distanceFinder.py
|
Deoy12/Fun_Projects
|
46355da4f4d98a3bfe7bc423291ea862c6f0e48f
|
[
"MIT"
] | null | null | null |
distanceFinder.py
|
Deoy12/Fun_Projects
|
46355da4f4d98a3bfe7bc423291ea862c6f0e48f
|
[
"MIT"
] | null | null | null |
import requests
import json
from bs4 import BeautifulSoup
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
headers = {
'user-agent': user_agent,
}
source = requests.get('https://www.zillow.com/berkeley-ca/apartments/', headers = headers).text
soup = BeautifulSoup(source, 'lxml')
listings = soup.find_all('article', class_= 'list-card list-card-short list-card_not-saved list-card_building')
def filter_listings(listings, limit = 3000):
addresses = []
prices = []
listings = [listing.text.split("|") for listing in listings]
for i in range(len(listings)):
listing = listings[i]
if len(listings[i]) != 1:
listing = listing[1]
else:
listing = listing[0]
split_listing = listing.split("$")
address = split_listing[0][:split_listing[0].find('CA') + 2].strip().replace(" ", '+')
price = split_listing[1].split(" ")[0].strip('+').replace(",","")
price = int(price)
if price <= limit:
addresses.append(address)
prices.append(price)
return addresses, prices
def find_distances(addresses, limit = 20):
pairs = {}
for address1 in addresses:
for address2 in addresses:
if address1 != address2 and (address1, address2) not in pairs and (address2, address1) not in pairs:
origin = address1
destination = address2
mode = 'walking'
response = requests.request('GET',f'https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={origin},DC&destinations={destination}&mode={mode}&key=#mykey')
response = response.content
response = json.loads(response)
time = response['rows'][0]['elements'][0]['duration']['text']
if time.find('hour') == -1:
time = time.strip('mins')
time = int(time)
if time < limit:
address_pair = (address1, address2)
pairs[address_pair] = time
return pairs
addresses, prices = filter_listings(listings)
print(find_distances(addresses, 5))
| 45.28 | 195 | 0.599382 |
b6c7946862178bf722ff6dd24b0703f8449118f6
| 2,037 |
py
|
Python
|
data_raw/wikidata/wd_ciic_edit.py
|
FellowsFreiesWissen/-
|
3e74e6531985b0e9ba2e34bf1f6a1224cc7b52f3
|
[
"MIT"
] | 1 |
2021-02-08T20:33:41.000Z
|
2021-02-08T20:33:41.000Z
|
data_raw/wikidata/wd_ciic_edit.py
|
FellowsFreiesWissen/-
|
3e74e6531985b0e9ba2e34bf1f6a1224cc7b52f3
|
[
"MIT"
] | null | null | null |
data_raw/wikidata/wd_ciic_edit.py
|
FellowsFreiesWissen/-
|
3e74e6531985b0e9ba2e34bf1f6a1224cc7b52f3
|
[
"MIT"
] | 1 |
2021-01-01T17:23:40.000Z
|
2021-01-01T17:23:40.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Florian Thiery"
__copyright__ = "MIT Licence 2021, Florian Thiery"
__credits__ = ["Florian Thiery"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Florian Thiery"
__email__ = "[email protected]"
__status__ = "1.0"
__update__ = "2021-05-01"
# import dependencies
import uuid
import requests
import io
import pandas as pd
import os
import codecs
import datetime
import importlib
import sys
import xml.etree.ElementTree as ET
import pandas_read_xml as pdx
import json
import numpy as np
import hashlib
# set UTF8 as default
# -*- coding: utf-8 -*-
importlib.reload(sys)
# read CSV [filename, inscription, CIIC]
dir_path = os.path.dirname(os.path.realpath(__file__))
csv = dir_path + "\\" + "os_mac.csv"
# read csv file
data = pd.read_csv(
csv,
encoding='utf-8',
sep=',',
usecols=['item', 'label', 'desc', 'geo', 'logainm', 'osm']
)
print(data.info())
lines = []
# https://jsoneditoronline.org/#left=local.rixegu&right=local.fipoze
for index, row in data.iterrows():
line = ""
line += "" + str(row['item']) + "\t" + "Len" + "\t" + "\"" + str(row['label']) + "\"" + "\r\n"
line += "" + str(row['item']) + "\t" + "Den" + "\t" + "\"" + str(row['desc']) + "\"" + "\r\n"
line += "" + str(row['item']) + "\t" + "P361" + "\t" + "Q100530634" + "\r\n"
line += "-" + str(row['item']) + "\t" + "P31" + "\t" + "Q2151232" + "\r\n"
line += "-" + str(row['item']) + "\t" + "P31" + "\t" + "Q2221906" + "\r\n"
if str(row['logainm']) != "nan":
line += "-" + str(row['item']) + "\t" + "P5097" + "\t" + "\"" + str(row['logainm']).replace(".0", "") + "\"" + "\r\n"
if str(row['osm']) != "nan":
line += "-" + str(row['item']) + "\t" + "P402" + "\t" + "\"" + str(row['osm']).replace(".0", "") + "\"" + "\r\n"
# add line to output array
lines.append(line)
# write output file
file_out = dir_path + "\\" + "wd_ciic_edit.qs"
file = codecs.open(file_out, "w", "utf-8")
for line in lines:
file.write(line)
file.close()
| 28.690141 | 125 | 0.568483 |
f0cad5002c777e0ad4d70047ba761b65b8028046
| 325 |
py
|
Python
|
Python/namecheck.py
|
BirendraRokaha/Learn2020
|
5da92b1947a266f009f9cd371dd6a03610b95c9b
|
[
"MIT"
] | null | null | null |
Python/namecheck.py
|
BirendraRokaha/Learn2020
|
5da92b1947a266f009f9cd371dd6a03610b95c9b
|
[
"MIT"
] | 11 |
2021-04-08T19:55:39.000Z
|
2022-03-02T09:51:03.000Z
|
Python/namecheck.py
|
BirendraRokaha/Learn2020
|
5da92b1947a266f009f9cd371dd6a03610b95c9b
|
[
"MIT"
] | null | null | null |
# check if the gien name is in the list
name_list = ['Ram','Hari','Sita', 'Gita', 'Rita']
a = [i.lower() for i in name_list]
print(a)
b = input("Enter the name to check: ")
c = b.lower()
if c in a:
print(f"Success, the name {c} exists in the list.")
else:
print(f"Sorry, the name {c} doesnot exist on the list.")
| 21.666667 | 60 | 0.624615 |
1edd8bfc43da05339c38a2950ab114530e8280c9
| 8,216 |
py
|
Python
|
modules/model/db_connection.py
|
fmawelle/ediViewer
|
f96269303440595bf6de91852c2ffe4483aa3dcb
|
[
"MIT"
] | null | null | null |
modules/model/db_connection.py
|
fmawelle/ediViewer
|
f96269303440595bf6de91852c2ffe4483aa3dcb
|
[
"MIT"
] | null | null | null |
modules/model/db_connection.py
|
fmawelle/ediViewer
|
f96269303440595bf6de91852c2ffe4483aa3dcb
|
[
"MIT"
] | null | null | null |
import os
import sqlite3
from sqlite3 import Error
from modules.utility import Utility
class Connection:
conn = None
def create_connection(self):
database_absolute_path = ''
connection_successful = False
conn = None
databases_dir = os.path.join(os.getcwd(), 'databases')
try:
if os.path.exists(databases_dir): # dir exists
database_absolute_path = os.path.join(databases_dir, 'data.db')
if os.path.exists(database_absolute_path): # perform sql operations
# print(database_absolute_path + ' exists.')
connection_successful = True
else: # create database and perform sql operations
# print(database_absolute_path + ' does NOT exist.')
print('Creating one')
connection_successful = True
else:
print('The path does not exist')
connection_successful = False
if connection_successful:
self.conn = sqlite3.connect(database_absolute_path)
return self.conn
except Error as e:
Utility.show_error('Database Connection', e) # print(e)
def insert_transaction(self, values):
try:
self.conn = self.create_connection()
qry = '''INSERT INTO TRANSACTION_LIST(FILE_NAME, TRANSACTION_ID, NAME) VALUES(?, ?, ?)'''
cursor = self.conn.cursor()
# for value in values:
cursor.executemany(qry, values)
self.conn.commit()
# print(values)
# self.commit()
except Error as e:
Utility.show_error('Database Error', e)
print(e)
finally:
self.conn.close()
def insert_transaction_details(self, table, transaction_dtls):
''' Acceptable values 0 = header, 1 = details, 2 = trailer '''
if table == 0:
table = 'TRANSACTION_HDR'
elif table == 1:
table = 'TRANSACTION_DTL'
else:
table = 'TRANSACTION_TRL'
try:
self.conn = self.create_connection()
qry = '''INSERT INTO {} (FILE_NAME, TRANSACTION_ID, SEGMENT_ID, SEGMENT) VALUES(?, ?, ?, ?)'''.format(table)
# print(qry)
cursor = self.conn.cursor()
# for value in values:
# qry.format('TRANSACTION_DTL')
cursor.executemany(qry, transaction_dtls)
self.conn.commit()
except Error as e:
Utility.show_error('Database Insert', e)
finally:
self.conn.close()
def delete_old_rows(self, file_name):
try:
self.conn = self.create_connection()
qry = '''DELETE FROM TRANSACTION_LIST WHERE FILE_NAME = ?'''
cursor = self.conn.cursor()
cursor.execute(qry, file_name)
qry = '''DELETE FROM TRANSACTION_DTL WHERE FILE_NAME = ?'''
cursor.execute(qry, file_name)
qry = '''DELETE FROM TRANSACTION_TRL WHERE FILE_NAME = ?'''
cursor.execute(qry, file_name)
qry = '''DELETE FROM TRANSACTION_HDR WHERE FILE_NAME = ?'''
cursor.execute(qry, file_name)
self.conn.commit()
# print('Successfully deleted old rows.')
except Error as e:
Utility.show_error('Database delete', e)
finally:
self.conn.close()
def get_transaction_list(self, file_name, transaction_name, contains):
try:
self.conn = self.create_connection()
cursor = self.conn.cursor()
qry = '''SELECT TRANSACTION_ID , NAME FROM TRANSACTION_LIST WHERE FILE_NAME = ?'''
name_only_qry = '''SELECT TL.TRANSACTION_ID, TL.NAME
FROM TRANSACTION_LIST TL WHERE TL.FILE_NAME = ? AND UPPER(TL.NAME) LIKE UPPER(?) '''
transaction_only_qry = ''' SELECT TL2.TRANSACTION_ID, TL2.NAME FROM TRANSACTION_LIST TL2
WHERE TL2.FILE_NAME = ? AND TL2.TRANSACTION_ID IN (SELECT TD.TRANSACTION_ID
FROM TRANSACTION_DTL TD
WHERE TD.FILE_NAME = TL2.FILE_NAME AND TD.TRANSACTION_ID = TL2.TRANSACTION_ID
AND UPPER(TD.SEGMENT) LIKE UPPER(?) )'''
if (not transaction_name) and (not contains): # most common operation
cursor.execute(qry, [file_name])
elif transaction_name and contains:
search_name = Connection.create_search_string(transaction_name)
transaction_content = Connection.create_search_string(contains)
qry = name_only_qry + ' UNION ' + transaction_only_qry
# print(qry)
cursor.execute(qry, [file_name, search_name, file_name, transaction_content])
elif transaction_name:
search_name = Connection.create_search_string(transaction_name)
cursor.execute(name_only_qry, [file_name, search_name])
else:
transaction_content = Connection.create_search_string(contains)
cursor.execute(transaction_only_qry, [file_name, transaction_content])
return cursor.fetchall()
except Error as e:
Utility.show_error('Database Transaction List', e)
return [] # return empty list
finally:
self.conn.close()
def get_transaction_detail(self, file_name, transact_id):
try:
self.conn = self.create_connection()
qry = '''SELECT SEGMENT FROM TRANSACTION_DTL WHERE FILE_NAME = ?
AND TRANSACTION_ID = ? ORDER BY SEGMENT_ID ASC'''
cursor = self.conn.cursor()
cursor.execute(qry, [file_name, transact_id])
return cursor.fetchall()
except Error as e:
Utility.show_error('Database Error', e)
return [] # return empty detail list
finally:
self.conn.close()
def header_trailer_rows(self, details):
try:
self.conn = self.create_connection()
qry = '''INSERT INTO TRANSACTION_HDR(FILE_NAME, TRANSACTION_ID, SEGMENT_ID, SEGMENT) VALUES(?, ?, ?, ?)'''
cursor = self.conn.cursor()
# for value in values:
cursor.executemany(qry, details)
self.conn.commit()
except Error as e:
Utility.show_error('Database Error', e)
print(e)
finally:
self.conn.close()
def update_current_theme(self, theme_id):
try:
self.conn = self.create_connection()
qry = '''UPDATE THEME_TBL SET IS_SELECTED = 0'''
cursor = self.conn.cursor()
cursor.execute(qry)
qry = '''UPDATE THEME_TBL SET IS_SELECTED = 1 WHERE THEME_ID = ?'''
cursor.execute(qry, [theme_id])
self.conn.commit()
except Exception as e:
Utility.show_error('Updating Theme', e)
finally:
self.conn.close()
def get_current_theme(self):
try:
self.conn = self.create_connection()
qry = '''SELECT THEME_ID, THEME FROM THEME_TBL WHERE IS_SELECTED = 1'''
cursor = self.conn.cursor()
cursor.execute(qry)
return cursor.fetchone()
except Exception as e:
Utility.show_error('Error Getting Theme', e)
finally:
self.conn.close()
@staticmethod
def create_search_string(search_string):
if search_string:
try:
index = search_string.find('%')
if index < 0: # no wild card, add one to search string
_search_string = search_string.split()
new_search_string = '%'
for search_item in _search_string:
new_search_string += search_item + '%'
return new_search_string
else: # search as typed
return search_string
except Exception as e:
Utility.show_error('String Concat Error', e)
return ''
| 41.705584 | 120 | 0.568038 |
5d08daf6fd9baa7577f3307e27a45051ed35317e
| 774 |
py
|
Python
|
02_collatz.py
|
tommirrington/52167-Programming-and-Scripting
|
b1c6d7d4c641abe67ccee4051f969c975dea3555
|
[
"Apache-2.0"
] | null | null | null |
02_collatz.py
|
tommirrington/52167-Programming-and-Scripting
|
b1c6d7d4c641abe67ccee4051f969c975dea3555
|
[
"Apache-2.0"
] | null | null | null |
02_collatz.py
|
tommirrington/52167-Programming-and-Scripting
|
b1c6d7d4c641abe67ccee4051f969c975dea3555
|
[
"Apache-2.0"
] | null | null | null |
# Tom Mirrington 2018-02-12
# Ecercise 2 submission
#prompt user for an integer
n = int(input("Please enter a positive integer: "))
#satisfy the condition that n is a positive integer
#determine if the value is an even number, if even then divide by 2
#if the value is not an even number or 1 then it must be odd therefore multiply by three and add one
while n >= 1:
if n == 1:
break
elif n % 2 == 0:
n = int(n/2)
print(n)
else:
n = (n * 3) + 1
print(n)
#References
#Collatz Conjecture https://en.wikipedia.org/wiki/Collatz_conjecture
#GMIT 52167 Programming and Scripting course material https://learnonline.gmit.ie/course/view.php?id=3940
#The Python Tutorial https://docs.python.org/3/tutorial/
| 27.642857 | 105 | 0.669251 |
f031ea1a505bff35c6c383cc610f1f60dfb63f84
| 8,668 |
py
|
Python
|
src/oci/opsi/models/update_operations_insights_warehouse_user_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/opsi/models/update_operations_insights_warehouse_user_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/opsi/models/update_operations_insights_warehouse_user_details.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateOperationsInsightsWarehouseUserDetails(object):
"""
The information to be updated.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateOperationsInsightsWarehouseUserDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param connection_password:
The value to assign to the connection_password property of this UpdateOperationsInsightsWarehouseUserDetails.
:type connection_password: str
:param is_awr_data_access:
The value to assign to the is_awr_data_access property of this UpdateOperationsInsightsWarehouseUserDetails.
:type is_awr_data_access: bool
:param is_em_data_access:
The value to assign to the is_em_data_access property of this UpdateOperationsInsightsWarehouseUserDetails.
:type is_em_data_access: bool
:param is_opsi_data_access:
The value to assign to the is_opsi_data_access property of this UpdateOperationsInsightsWarehouseUserDetails.
:type is_opsi_data_access: bool
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateOperationsInsightsWarehouseUserDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateOperationsInsightsWarehouseUserDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'connection_password': 'str',
'is_awr_data_access': 'bool',
'is_em_data_access': 'bool',
'is_opsi_data_access': 'bool',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'connection_password': 'connectionPassword',
'is_awr_data_access': 'isAwrDataAccess',
'is_em_data_access': 'isEmDataAccess',
'is_opsi_data_access': 'isOpsiDataAccess',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._connection_password = None
self._is_awr_data_access = None
self._is_em_data_access = None
self._is_opsi_data_access = None
self._freeform_tags = None
self._defined_tags = None
@property
def connection_password(self):
"""
Gets the connection_password of this UpdateOperationsInsightsWarehouseUserDetails.
User provided connection password for the AWR Data, Enterprise Manager Data and Operations Insights OPSI Hub.
:return: The connection_password of this UpdateOperationsInsightsWarehouseUserDetails.
:rtype: str
"""
return self._connection_password
@connection_password.setter
def connection_password(self, connection_password):
"""
Sets the connection_password of this UpdateOperationsInsightsWarehouseUserDetails.
User provided connection password for the AWR Data, Enterprise Manager Data and Operations Insights OPSI Hub.
:param connection_password: The connection_password of this UpdateOperationsInsightsWarehouseUserDetails.
:type: str
"""
self._connection_password = connection_password
@property
def is_awr_data_access(self):
"""
Gets the is_awr_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
Indicate whether user has access to AWR data.
:return: The is_awr_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
:rtype: bool
"""
return self._is_awr_data_access
@is_awr_data_access.setter
def is_awr_data_access(self, is_awr_data_access):
"""
Sets the is_awr_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
Indicate whether user has access to AWR data.
:param is_awr_data_access: The is_awr_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
:type: bool
"""
self._is_awr_data_access = is_awr_data_access
@property
def is_em_data_access(self):
"""
Gets the is_em_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
Indicate whether user has access to EM data.
:return: The is_em_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
:rtype: bool
"""
return self._is_em_data_access
@is_em_data_access.setter
def is_em_data_access(self, is_em_data_access):
"""
Sets the is_em_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
Indicate whether user has access to EM data.
:param is_em_data_access: The is_em_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
:type: bool
"""
self._is_em_data_access = is_em_data_access
@property
def is_opsi_data_access(self):
"""
Gets the is_opsi_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
Indicate whether user has access to OPSI data.
:return: The is_opsi_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
:rtype: bool
"""
return self._is_opsi_data_access
@is_opsi_data_access.setter
def is_opsi_data_access(self, is_opsi_data_access):
"""
Sets the is_opsi_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
Indicate whether user has access to OPSI data.
:param is_opsi_data_access: The is_opsi_data_access of this UpdateOperationsInsightsWarehouseUserDetails.
:type: bool
"""
self._is_opsi_data_access = is_opsi_data_access
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateOperationsInsightsWarehouseUserDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this UpdateOperationsInsightsWarehouseUserDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateOperationsInsightsWarehouseUserDetails.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this UpdateOperationsInsightsWarehouseUserDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateOperationsInsightsWarehouseUserDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this UpdateOperationsInsightsWarehouseUserDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateOperationsInsightsWarehouseUserDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this UpdateOperationsInsightsWarehouseUserDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 37.686957 | 245 | 0.696931 |
a747db14e2471e19c7374237ecb0c7d0d314391e
| 767 |
py
|
Python
|
python/ql/test/experimental/dataflow/fieldflow/test.py
|
saumyaagrawal2001/codeql
|
ec9ef968b7ad54b40a8976776653cac3c4d1e0b5
|
[
"MIT"
] | 2 |
2021-06-13T07:04:31.000Z
|
2021-06-13T07:04:34.000Z
|
python/ql/test/experimental/dataflow/fieldflow/test.py
|
minato371000/codeql
|
339c0721c5e1ac25bea75dd4168616ec488c4238
|
[
"MIT"
] | null | null | null |
python/ql/test/experimental/dataflow/fieldflow/test.py
|
minato371000/codeql
|
339c0721c5e1ac25bea75dd4168616ec488c4238
|
[
"MIT"
] | null | null | null |
from python.ql.test.experimental.dataflow.testDefinitions import *
# Preamble
class MyObj(object):
def __init__(self, foo):
self.foo = foo
class NestedObj(object):
def __init__(self):
self.obj = MyObj("OK")
def getObj(self):
return self.obj
def setFoo(obj, x):
SINK_F(obj.foo)
obj.foo = x
def test_example1():
myobj = MyObj("OK")
setFoo(myobj, SOURCE)
SINK(myobj.foo)
def test_example2():
x = SOURCE
a = NestedObj()
a.obj.foo = x
a.getObj().foo = x
SINK(a.obj.foo)
def test_example3():
obj = MyObj(SOURCE)
SINK(obj.foo)
def fields_with_local_flow(x):
obj = MyObj(x)
a = obj.foo
return a
def test_fields():
SINK(fields_with_local_flow(SOURCE))
| 13.45614 | 66 | 0.614081 |
de6a8f5195db87b7b19984cb908685e9749e3743
| 3,100 |
py
|
Python
|
data/p2DJ/New/R2/benchmark/startQiskit_QC48.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/R2/benchmark/startQiskit_QC48.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/R2/benchmark/startQiskit_QC48.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=2
# total number=10
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.cx(input_qubit[0],input_qubit[1]) # number=4
prog.cx(input_qubit[0],input_qubit[1]) # number=7
prog.x(input_qubit[1]) # number=8
prog.cx(input_qubit[0],input_qubit[1]) # number=9
prog.cx(input_qubit[0],input_qubit[1]) # number=6
prog.x(input_qubit[1]) # number=3
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit_QC48.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.181818 | 82 | 0.624516 |
82f9b77686b2bdd65f64f9b076ac595d7b250d43
| 3,183 |
py
|
Python
|
Python/selenium webscrapping/country_check.py
|
SitramSoft/programming_practice
|
5c4131582634b5dc5a586be891bf645ab7dc5f9a
|
[
"MIT"
] | null | null | null |
Python/selenium webscrapping/country_check.py
|
SitramSoft/programming_practice
|
5c4131582634b5dc5a586be891bf645ab7dc5f9a
|
[
"MIT"
] | null | null | null |
Python/selenium webscrapping/country_check.py
|
SitramSoft/programming_practice
|
5c4131582634b5dc5a586be891bf645ab7dc5f9a
|
[
"MIT"
] | null | null | null |
# Check if USA is member of WHO website
# - go to https://www.who.int/
# - click on "Countries"
# - click on "All Countries"
# - Filter by region: type 'americas' and click on 'Region of Americas'
# - Assert if 'United States of America' is on the page
# Prerequisites:
# Installing Selenium libraries - https://www.selenium.dev/documentation/en/selenium_installation/installing_selenium_libraries/
# Installing WebDriver binaries - https://www.selenium.dev/documentation/en/selenium_installation/installing_webdriver_binaries/
# Selenium requires a driver to interface with the chosen browser.
# Chrome: https://sites.google.com/a/chromium.org/chromedriver/downloads
# Edge: https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/
# Firefox: https://github.com/mozilla/geckodriver/releases
# Safari: https://webkit.org/blog/6900/webdriver-support-in-safari-10/
from selenium import webdriver
from pyvirtualdisplay import Display
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
# The url under test
url = "https://www.who.int/"
# Creating the browser object using the ChromeDriver
browser_options = Options()
browser_options.add_argument("--headless")
browser = webdriver.Chrome(options=browser_options)
# maximize browser window to avoid possible issues with elements not visible
browser.maximize_window()
# Directing the browser to the defined url
browser.get(url)
actions = ActionChains(browser)
# click Countries hover menu in top nav bar
countries_menu = browser.find_element_by_xpath('/html/body/div[3]/header/div[3]/div/div[1]/ul/li[3]/a')
actions.move_to_element(countries_menu)
actions.click(countries_menu).perform()
# Click All Countries submenu in top nav bar
all_countries_menu = browser.find_element_by_xpath('/html/body/div[3]/header/div[3]/div/div[2]/div[3]/div[2]/div/div/div/div[1]/div/div/div/ul/li[1]/a')
actions.move_to_element(all_countries_menu).perform()
all_countries_menu.click()
# find Filter by region input box
filter_by_region = browser.find_element_by_xpath('/html/body/div[3]/section/div[2]/div/div/div[2]/div[2]/div[1]/div/div/div/div[1]/div/span/span/input')
# type 'americas'
filter_by_region.send_keys('americas')
# wait until 'Region of Americas' is visible
try:
WebDriverWait(browser,5).until(
EC.presence_of_element_located((By.XPATH, '//ul[@id="filter-by-region_listbox" and @aria-hidden="false"]'))
)
except NoSuchElementException:
browser.quit()
exit(1)
# send key down to select 'Region of Americas'
filter_by_region.send_keys(Keys.ARROW_DOWN)
# send key Enter
filter_by_region.send_keys(Keys.ENTER)
# Assert if 'United States of America' is on the page
text_to_search = 'United States of America'
assert text_to_search in browser.page_source,"'United States of America' not found on page"
#close browser
browser.quit()
| 39.7875 | 152 | 0.775055 |
ce380dc9ea814c3484731d61037d8fa7818e0693
| 22,484 |
py
|
Python
|
airflow/contrib/utils/gcp_field_validator.py
|
Ryan-Miao/airflow
|
a2aca8714fac014ed7da97229d7877f1bc6e5a59
|
[
"Apache-2.0"
] | 3 |
2019-03-28T05:59:39.000Z
|
2019-10-03T22:05:25.000Z
|
airflow/contrib/utils/gcp_field_validator.py
|
Ryan-Miao/airflow
|
a2aca8714fac014ed7da97229d7877f1bc6e5a59
|
[
"Apache-2.0"
] | 7 |
2019-03-27T07:58:14.000Z
|
2020-02-12T17:42:33.000Z
|
airflow/contrib/utils/gcp_field_validator.py
|
Ryan-Miao/airflow
|
a2aca8714fac014ed7da97229d7877f1bc6e5a59
|
[
"Apache-2.0"
] | 2 |
2018-11-01T22:36:10.000Z
|
2019-11-23T13:36:53.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Validator for body fields sent via GCP API.
The validator performs validation of the body (being dictionary of fields) that
is sent in the API request to Google Cloud (via googleclient API usually).
Context
-------
The specification mostly focuses on helping Airflow DAG developers in the development
phase. You can build your own GCP operator (such as GcfDeployOperator for example) which
can have built-in validation specification for the particular API. It's super helpful
when developer plays with different fields and their values at the initial phase of
DAG development. Most of the Google Cloud APIs perform their own validation on the
server side, but most of the requests are asynchronous and you need to wait for result
of the operation. This takes precious times and slows
down iteration over the API. BodyFieldValidator is meant to be used on the client side
and it should therefore provide an instant feedback to the developer on misspelled or
wrong type of parameters.
The validation should be performed in "execute()" method call in order to allow
template parameters to be expanded before validation is performed.
Types of fields
---------------
Specification is an array of dictionaries - each dictionary describes field, its type,
validation, optionality, api_version supported and nested fields (for unions and dicts).
Typically (for clarity and in order to aid syntax highlighting) the array of
dicts should be defined as series of dict() executions. Fragment of example
specification might look as follows::
SPECIFICATION =[
dict(name="an_union", type="union", optional=True, fields=[
dict(name="variant_1", type="dict"),
dict(name="variant_2", regexp=r'^.+$', api_version='v1beta2'),
),
dict(name="an_union", type="dict", fields=[
dict(name="field_1", type="dict"),
dict(name="field_2", regexp=r'^.+$'),
),
...
]
Each field should have key = "name" indicating field name. The field can be of one of the
following types:
* Dict fields: (key = "type", value="dict"):
Field of this type should contain nested fields in form of an array of dicts.
Each of the fields in the array is then expected (unless marked as optional)
and validated recursively. If an extra field is present in the dictionary, warning is
printed in log file (but the validation succeeds - see the Forward-compatibility notes)
* List fields: (key = "type", value="list"):
Field of this type should be a list. Only the type correctness is validated.
The contents of a list are not subject to validation.
* Union fields (key = "type", value="union"): field of this type should contain nested
fields in form of an array of dicts. One of the fields (and only one) should be
present (unless the union is marked as optional). If more than one union field is
present, FieldValidationException is raised. If none of the union fields is
present - warning is printed in the log (see below Forward-compatibility notes).
* Fields validated for non-emptiness: (key = "allow_empty") - this applies only to
fields the value of which is a string, and it allows to check for non-emptiness of
the field (allow_empty=False).
* Regexp-validated fields: (key = "regexp") - fields of this type are assumed to be
strings and they are validated with the regexp specified. Remember that the regexps
should ideally contain ^ at the beginning and $ at the end to make sure that
the whole field content is validated. Typically such regexp
validations should be used carefully and sparingly (see Forward-compatibility
notes below).
* Custom-validated fields: (key = "custom_validation") - fields of this type are validated
using method specified via custom_validation field. Any exception thrown in the custom
validation will be turned into FieldValidationException and will cause validation to
fail. Such custom validations might be used to check numeric fields (including
ranges of values), booleans or any other types of fields.
* API version: (key="api_version") if API version is specified, then the field will only
be validated when api_version used at field validator initialization matches exactly the
the version specified. If you want to declare fields that are available in several
versions of the APIs, you should specify the field as many times as many API versions
should be supported (each time with different API version).
* if none of the keys ("type", "regexp", "custom_validation" - the field is not validated
You can see some of the field examples in EXAMPLE_VALIDATION_SPECIFICATION.
Forward-compatibility notes
---------------------------
Certain decisions are crucial to allow the client APIs to work also with future API
versions. Since body attached is passed to the API’s call, this is entirely
possible to pass-through any new fields in the body (for future API versions) -
albeit without validation on the client side - they can and will still be validated
on the server side usually.
Here are the guidelines that you should follow to make validation forward-compatible:
* most of the fields are not validated for their content. It's possible to use regexp
in some specific cases that are guaranteed not to change in the future, but for most
fields regexp validation should be r'^.+$' indicating check for non-emptiness
* api_version is not validated - user can pass any future version of the api here. The API
version is only used to filter parameters that are marked as present in this api version
any new (not present in the specification) fields in the body are allowed (not verified)
For dictionaries, new fields can be added to dictionaries by future calls. However if an
unknown field in dictionary is added, a warning is logged by the client (but validation
remains successful). This is very nice feature to protect against typos in names.
* For unions, newly added union variants can be added by future calls and they will
pass validation, however the content or presence of those fields will not be validated.
This means that it’s possible to send a new non-validated union field together with an
old validated field and this problem will not be detected by the client. In such case
warning will be printed.
* When you add validator to an operator, you should also add ``validate_body`` parameter
(default = True) to __init__ of such operators - when it is set to False,
no validation should be performed. This is a safeguard for totally unpredicted and
backwards-incompatible changes that might sometimes occur in the APIs.
"""
import re
from typing import Sequence, Dict, Callable
from airflow import LoggingMixin, AirflowException
COMPOSITE_FIELD_TYPES = ['union', 'dict', 'list']
class GcpFieldValidationException(AirflowException):
"""Thrown when validation finds dictionary field not valid according to specification.
"""
class GcpValidationSpecificationException(AirflowException):
"""Thrown when validation specification is wrong.
This should only happen during development as ideally
specification itself should not be invalid ;) .
"""
def _int_greater_than_zero(value):
if int(value) <= 0:
raise GcpFieldValidationException("The available memory has to be greater than 0")
EXAMPLE_VALIDATION_SPECIFICATION = [
dict(name="name", allow_empty=False),
dict(name="description", allow_empty=False, optional=True),
dict(name="availableMemoryMb", custom_validation=_int_greater_than_zero,
optional=True),
dict(name="labels", optional=True, type="dict"),
dict(name="an_union", type="union", fields=[
dict(name="variant_1", regexp=r'^.+$'),
dict(name="variant_2", regexp=r'^.+$', api_version='v1beta2'),
dict(name="variant_3", type="dict", fields=[
dict(name="url", regexp=r'^.+$')
]),
dict(name="variant_4")
]),
]
class GcpBodyFieldValidator(LoggingMixin):
"""Validates correctness of request body according to specification.
The specification can describe various type of
fields including custom validation, and union of fields. This validator is
to be reusable by various operators. See the EXAMPLE_VALIDATION_SPECIFICATION
for some examples and explanations of how to create specification.
:param validation_specs: dictionary describing validation specification
:type validation_specs: list[dict]
:param api_version: Version of the api used (for example v1)
:type api_version: str
"""
def __init__(self, validation_specs: Sequence[str], api_version: str) -> None:
super().__init__()
self._validation_specs = validation_specs
self._api_version = api_version
@staticmethod
def _get_field_name_with_parent(field_name, parent):
if parent:
return parent + '.' + field_name
return field_name
@staticmethod
def _sanity_checks(children_validation_specs: Dict, field_type: str, full_field_path: str,
regexp: str, allow_empty: bool, custom_validation: Callable, value) -> None:
if value is None and field_type != 'union':
raise GcpFieldValidationException(
"The required body field '{}' is missing. Please add it.".
format(full_field_path))
if regexp and field_type:
raise GcpValidationSpecificationException(
"The validation specification entry '{}' has both type and regexp. "
"The regexp is only allowed without type (i.e. assume type is 'str' "
"that can be validated with regexp)".format(full_field_path))
if allow_empty is not None and field_type:
raise GcpValidationSpecificationException(
"The validation specification entry '{}' has both type and allow_empty. "
"The allow_empty is only allowed without type (i.e. assume type is 'str' "
"that can be validated with allow_empty)".format(full_field_path))
if children_validation_specs and field_type not in COMPOSITE_FIELD_TYPES:
raise GcpValidationSpecificationException(
"Nested fields are specified in field '{}' of type '{}'. "
"Nested fields are only allowed for fields of those types: ('{}').".
format(full_field_path, field_type, COMPOSITE_FIELD_TYPES))
if custom_validation and field_type:
raise GcpValidationSpecificationException(
"The validation specification field '{}' has both type and "
"custom_validation. Custom validation is only allowed without type.".
format(full_field_path))
@staticmethod
def _validate_regexp(full_field_path: str, regexp: str, value: str) -> None:
if not re.match(regexp, value):
# Note matching of only the beginning as we assume the regexps all-or-nothing
raise GcpFieldValidationException(
"The body field '{}' of value '{}' does not match the field "
"specification regexp: '{}'.".
format(full_field_path, value, regexp))
@staticmethod
def _validate_is_empty(full_field_path: str, value: str) -> None:
if not value:
raise GcpFieldValidationException(
"The body field '{}' can't be empty. Please provide a value."
.format(full_field_path, value))
def _validate_dict(self, children_validation_specs: Dict, full_field_path: str, value: Dict) -> None:
for child_validation_spec in children_validation_specs:
self._validate_field(validation_spec=child_validation_spec,
dictionary_to_validate=value,
parent=full_field_path)
all_dict_keys = [spec['name'] for spec in children_validation_specs]
for field_name in value.keys():
if field_name not in all_dict_keys:
self.log.warning(
"The field '%s' is in the body, but is not specified in the "
"validation specification '%s'. "
"This might be because you are using newer API version and "
"new field names defined for that version. Then the warning "
"can be safely ignored, or you might want to upgrade the operator"
"to the version that supports the new API version.",
self._get_field_name_with_parent(field_name, full_field_path),
children_validation_specs)
def _validate_union(self, children_validation_specs: Dict, full_field_path: str,
dictionary_to_validate: Dict) -> None:
field_found = False
found_field_name = None
for child_validation_spec in children_validation_specs:
# Forcing optional so that we do not have to type optional = True
# in specification for all union fields
new_field_found = self._validate_field(
validation_spec=child_validation_spec,
dictionary_to_validate=dictionary_to_validate,
parent=full_field_path,
force_optional=True)
field_name = child_validation_spec['name']
if new_field_found and field_found:
raise GcpFieldValidationException(
"The mutually exclusive fields '{}' and '{}' belonging to the "
"union '{}' are both present. Please remove one".
format(field_name, found_field_name, full_field_path))
if new_field_found:
field_found = True
found_field_name = field_name
if not field_found:
self.log.warning(
"There is no '%s' union defined in the body %s. "
"Validation expected one of '%s' but could not find any. It's possible "
"that you are using newer API version and there is another union variant "
"defined for that version. Then the warning can be safely ignored, "
"or you might want to upgrade the operator to the version that "
"supports the new API version.",
full_field_path, dictionary_to_validate,
[field['name'] for field in children_validation_specs])
def _validate_field(self, validation_spec, dictionary_to_validate, parent=None,
force_optional=False):
"""
Validates if field is OK.
:param validation_spec: specification of the field
:type validation_spec: dict
:param dictionary_to_validate: dictionary where the field should be present
:type dictionary_to_validate: dict
:param parent: full path of parent field
:type parent: str
:param force_optional: forces the field to be optional
(all union fields have force_optional set to True)
:type force_optional: bool
:return: True if the field is present
"""
field_name = validation_spec['name']
field_type = validation_spec.get('type')
optional = validation_spec.get('optional')
regexp = validation_spec.get('regexp')
allow_empty = validation_spec.get('allow_empty')
children_validation_specs = validation_spec.get('fields')
required_api_version = validation_spec.get('api_version')
custom_validation = validation_spec.get('custom_validation')
full_field_path = self._get_field_name_with_parent(field_name=field_name,
parent=parent)
if required_api_version and required_api_version != self._api_version:
self.log.debug(
"Skipping validation of the field '%s' for API version '%s' "
"as it is only valid for API version '%s'",
field_name, self._api_version, required_api_version)
return False
value = dictionary_to_validate.get(field_name)
if (optional or force_optional) and value is None:
self.log.debug("The optional field '%s' is missing. That's perfectly OK.", full_field_path)
return False
# Certainly down from here the field is present (value is not None)
# so we should only return True from now on
self._sanity_checks(children_validation_specs=children_validation_specs,
field_type=field_type,
full_field_path=full_field_path,
regexp=regexp,
allow_empty=allow_empty,
custom_validation=custom_validation,
value=value)
if allow_empty is False:
self._validate_is_empty(full_field_path, value)
if regexp:
self._validate_regexp(full_field_path, regexp, value)
elif field_type == 'dict':
if not isinstance(value, dict):
raise GcpFieldValidationException(
"The field '{}' should be of dictionary type according to the "
"specification '{}' but it is '{}'".
format(full_field_path, validation_spec, value))
if children_validation_specs is None:
self.log.debug(
"The dict field '%s' has no nested fields defined in the "
"specification '%s'. That's perfectly ok - it's content will "
"not be validated.", full_field_path, validation_spec)
else:
self._validate_dict(children_validation_specs, full_field_path, value)
elif field_type == 'union':
if not children_validation_specs:
raise GcpValidationSpecificationException(
"The union field '{}' has no nested fields "
"defined in specification '{}'. Unions should have at least one "
"nested field defined.".format(full_field_path, validation_spec))
self._validate_union(children_validation_specs, full_field_path,
dictionary_to_validate)
elif field_type == 'list':
if not isinstance(value, list):
raise GcpFieldValidationException(
"The field '{}' should be of list type according to the "
"specification '{}' but it is '{}'".
format(full_field_path, validation_spec, value))
elif custom_validation:
try:
custom_validation(value)
except Exception as e:
raise GcpFieldValidationException(
"Error while validating custom field '{}' specified by '{}': '{}'".
format(full_field_path, validation_spec, e))
elif field_type is None:
self.log.debug("The type of field '%s' is not specified in '%s'. "
"Not validating its content.", full_field_path, validation_spec)
else:
raise GcpValidationSpecificationException(
"The field '{}' is of type '{}' in specification '{}'."
"This type is unknown to validation!".format(
full_field_path, field_type, validation_spec))
return True
def validate(self, body_to_validate):
"""
Validates if the body (dictionary) follows specification that the validator was
instantiated with. Raises ValidationSpecificationException or
ValidationFieldException in case of problems with specification or the
body not conforming to the specification respectively.
:param body_to_validate: body that must follow the specification
:type body_to_validate: dict
:return: None
"""
try:
for validation_spec in self._validation_specs:
self._validate_field(validation_spec=validation_spec,
dictionary_to_validate=body_to_validate)
except GcpFieldValidationException as e:
raise GcpFieldValidationException(
"There was an error when validating: body '{}': '{}'".
format(body_to_validate, e))
all_field_names = [spec['name'] for spec in self._validation_specs
if spec.get('type') != 'union' and
spec.get('api_version') != self._api_version]
all_union_fields = [spec for spec in self._validation_specs
if spec.get('type') == 'union']
for union_field in all_union_fields:
all_field_names.extend(
[nested_union_spec['name'] for nested_union_spec in union_field['fields']
if nested_union_spec.get('type') != 'union' and
nested_union_spec.get('api_version') != self._api_version])
for field_name in body_to_validate.keys():
if field_name not in all_field_names:
self.log.warning(
"The field '%s' is in the body, but is not specified in the "
"validation specification '%s'. "
"This might be because you are using newer API version and "
"new field names defined for that version. Then the warning "
"can be safely ignored, or you might want to upgrade the operator"
"to the version that supports the new API version.",
field_name, self._validation_specs)
| 51.926097 | 105 | 0.665762 |
ade262ad607ce248e470ac45933df37b204f11f2
| 5,382 |
py
|
Python
|
tests/generator/test_rom.py
|
venustar1228/cactus-blockchain
|
a2fec724eb19489898ffa306d4203de11edb94b0
|
[
"Apache-2.0"
] | 20 |
2021-07-16T18:08:13.000Z
|
2022-03-20T02:38:39.000Z
|
tests/generator/test_rom.py
|
Cactus-Network/cactus-blockchain
|
9eef13171dff764bd0549de1479d775272e16bcc
|
[
"Apache-2.0"
] | 29 |
2021-07-17T00:38:18.000Z
|
2022-03-29T19:11:48.000Z
|
tests/generator/test_rom.py
|
venustar1228/cactus-blockchain
|
a2fec724eb19489898ffa306d4203de11edb94b0
|
[
"Apache-2.0"
] | 21 |
2021-07-17T02:18:57.000Z
|
2022-03-15T08:26:56.000Z
|
from clvm_tools import binutils
from clvm_tools.clvmc import compile_clvm_text
from cactus.full_node.generator import run_generator
from cactus.full_node.mempool_check_conditions import get_name_puzzle_conditions
from cactus.types.blockchain_format.program import Program, SerializedProgram
from cactus.types.blockchain_format.sized_bytes import bytes32
from cactus.types.condition_with_args import ConditionWithArgs
from cactus.types.name_puzzle_condition import NPC
from cactus.types.generator_types import BlockGenerator, GeneratorArg
from cactus.util.clvm import int_to_bytes
from cactus.util.condition_tools import ConditionOpcode
from cactus.util.ints import uint32
from cactus.wallet.puzzles.load_clvm import load_clvm
MAX_COST = int(1e15)
COST_PER_BYTE = int(12000)
DESERIALIZE_MOD = load_clvm("cactuslisp_deserialisation.clvm", package_or_requirement="cactus.wallet.puzzles")
GENERATOR_CODE = """
(mod (deserialize-mod historical-generators)
(defun first-block (deserialize-mod historical-generators)
(a deserialize-mod (list (f historical-generators))))
(defun second-block (deserialize-mod historical-generators)
(a deserialize-mod (r historical-generators)))
(defun go (deserialize-mod historical-generators)
(c (first-block deserialize-mod historical-generators)
(second-block deserialize-mod historical-generators)
))
(go deserialize-mod historical-generators)
)
"""
COMPILED_GENERATOR_CODE = bytes.fromhex(
"ff02ffff01ff04ffff02ff04ffff04ff02ffff04ff05ffff04ff0bff8080808080ffff02"
"ff06ffff04ff02ffff04ff05ffff04ff0bff808080808080ffff04ffff01ffff02ff05ff"
"1380ff02ff05ff2b80ff018080"
)
COMPILED_GENERATOR_CODE = bytes(Program.to(compile_clvm_text(GENERATOR_CODE, [])))
FIRST_GENERATOR = Program.to(
binutils.assemble('((parent_id (c 1 (q "puzzle blob")) 50000 "solution is here" extra data for coin))')
).as_bin()
SECOND_GENERATOR = Program.to(binutils.assemble("(extra data for block)")).as_bin()
FIRST_GENERATOR = Program.to(
binutils.assemble(
"""
((0x0000000000000000000000000000000000000000000000000000000000000000 1 50000
((51 0x0000000000000000000000000000000000000000000000000000000000000001 500)) "extra" "data" "for" "coin" ))"""
)
).as_bin()
SECOND_GENERATOR = Program.to(binutils.assemble("(extra data for block)")).as_bin()
def to_sp(sexp) -> SerializedProgram:
return SerializedProgram.from_bytes(bytes(sexp))
def block_generator() -> BlockGenerator:
generator_args = [GeneratorArg(uint32(0), to_sp(FIRST_GENERATOR)), GeneratorArg(uint32(1), to_sp(SECOND_GENERATOR))]
return BlockGenerator(to_sp(COMPILED_GENERATOR_CODE), generator_args)
EXPECTED_ABBREVIATED_COST = 108379
EXPECTED_COST = 113415
EXPECTED_OUTPUT = (
"ffffffa00000000000000000000000000000000000000000000000000000000000000000"
"ff01ff8300c350ffffff33ffa00000000000000000000000000000000000000000000000"
"000000000000000001ff8201f48080ff856578747261ff8464617461ff83666f72ff8463"
"6f696e8080ff856578747261ff8464617461ff83666f72ff85626c6f636b80"
)
class TestROM:
def test_rom_inputs(self):
# this test checks that the generator just works
# It's useful for debugging the generator prior to having the ROM invoke it.
args = Program.to([DESERIALIZE_MOD, [FIRST_GENERATOR, SECOND_GENERATOR]])
sp = to_sp(COMPILED_GENERATOR_CODE)
cost, r = sp.run_with_cost(MAX_COST, args)
assert cost == EXPECTED_ABBREVIATED_COST
assert r.as_bin().hex() == EXPECTED_OUTPUT
def test_get_name_puzzle_conditions(self):
# this tests that extra block or coin data doesn't confuse `get_name_puzzle_conditions`
gen = block_generator()
cost, r = run_generator(gen, max_cost=MAX_COST)
print(r)
npc_result = get_name_puzzle_conditions(gen, max_cost=MAX_COST, cost_per_byte=COST_PER_BYTE, safe_mode=False)
assert npc_result.error is None
assert npc_result.clvm_cost == EXPECTED_COST
cond_1 = ConditionWithArgs(ConditionOpcode.CREATE_COIN, [bytes([0] * 31 + [1]), int_to_bytes(500), b""])
CONDITIONS = [
(ConditionOpcode.CREATE_COIN, [cond_1]),
]
npc = NPC(
coin_name=bytes32.fromhex("e8538c2d14f2a7defae65c5c97f5d4fae7ee64acef7fec9d28ad847a0880fd03"),
puzzle_hash=bytes32.fromhex("9dcf97a184f32623d11a73124ceb99a5709b083721e878a16d78f596718ba7b2"),
conditions=CONDITIONS,
)
assert npc_result.npc_list == [npc]
def test_coin_extras(self):
# the ROM supports extra data after a coin. This test checks that it actually gets passed through
gen = block_generator()
cost, r = run_generator(gen, max_cost=MAX_COST)
coin_spends = r.first()
for coin_spend in coin_spends.as_iter():
extra_data = coin_spend.rest().rest().rest().rest()
assert extra_data.as_atom_list() == b"extra data for coin".split()
def test_block_extras(self):
# the ROM supports extra data after the coin spend list. This test checks that it actually gets passed through
gen = block_generator()
cost, r = run_generator(gen, max_cost=MAX_COST)
extra_block_data = r.rest()
assert extra_block_data.as_atom_list() == b"extra data for block".split()
| 39.573529 | 120 | 0.745448 |
be4e4f72b3ce4e27129f6b40c3cad23771a3024b
| 4,811 |
py
|
Python
|
video_processing_mediapipe.py
|
crisdeodates/AI-OSSDC-VisionAI-Core
|
17c3b480ef41add5bd58093a59ccea41035791e0
|
[
"Apache-2.0"
] | 66 |
2019-04-02T00:05:36.000Z
|
2022-03-20T07:18:45.000Z
|
video_processing_mediapipe.py
|
liflo/OSSDC-VisionAI-Core
|
17c3b480ef41add5bd58093a59ccea41035791e0
|
[
"Apache-2.0"
] | null | null | null |
video_processing_mediapipe.py
|
liflo/OSSDC-VisionAI-Core
|
17c3b480ef41add5bd58093a59ccea41035791e0
|
[
"Apache-2.0"
] | 17 |
2021-02-08T22:52:28.000Z
|
2022-01-15T11:52:14.000Z
|
import traceback
import cv2
import numpy as np
import sys
import argparse
from datetime import datetime
import os
# MediaPipe algorithms in Python
# https://google.github.io/mediapipe/getting_started/python.html
# Install steps:
# pip install mediapipe
# Status: working
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
mp_hands = mp.solutions.hands
mp_pose = mp.solutions.pose
mp_holistic = mp.solutions.holistic
def init_model(transform):
if transform == "facemesh":
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
return (face_mesh,drawing_spec), None
elif transform == "hands":
hands = mp_hands.Hands(min_detection_confidence=0.5, min_tracking_confidence=0.5)
return (hands), None
elif transform == "pose":
pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
return (pose), None
elif transform == "holistic":
holistic = mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5)
return (holistic), None
return None,None
def process_image(transform,processing_model,img):
tracks = []
try:
if transform == "facemesh":
(face_mesh,drawing_spec) = processing_model
image = cv2.cvtColor(cv2.flip(img, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = face_mesh.process(image)
# Draw the face mesh annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACE_CONNECTIONS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
img = image
tracks = results.multi_face_landmarks
elif transform == "hands":
(hands) = processing_model
image = cv2.cvtColor(cv2.flip(img, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
img = image
tracks = results.multi_hand_landmarks
elif transform == "pose":
(pose) = processing_model
image = cv2.cvtColor(cv2.flip(img, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = pose.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
img = image
traks = results.pose_landmarks
elif transform == "holistic":
(holistic) = processing_model
image = cv2.cvtColor(cv2.flip(img, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = holistic.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
mp_drawing.draw_landmarks(
image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS)
mp_drawing.draw_landmarks(
image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(
image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(
image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)
img = image
# return just face landmarks for now
traks = results.face_landmarks
except Exception as e:
track = traceback.format_exc()
print(track)
print("MediaPipe Exception",e)
pass
return tracks,img
| 37.585938 | 98 | 0.672417 |
0a597c0d4bc8fc167b8d3a90a218cf16ec688899
| 7,035 |
py
|
Python
|
Lib/site-packages/PyQt5/examples/itemviews/simpledommodel.py
|
heylenz/python27
|
bee49fa9d65b8ab7d591146a5b6cd47aeb41d940
|
[
"bzip2-1.0.6",
"MIT"
] | 2 |
2020-11-09T23:56:54.000Z
|
2021-07-29T23:15:59.000Z
|
PyQt5_gpl-5.8/examples/itemviews/simpledommodel.py
|
ArjandeV/iracing-overlay
|
6286348d78f1538f64928ec867cafc65124eea3d
|
[
"MIT"
] | null | null | null |
PyQt5_gpl-5.8/examples/itemviews/simpledommodel.py
|
ArjandeV/iracing-overlay
|
6286348d78f1538f64928ec867cafc65124eea3d
|
[
"MIT"
] | 1 |
2020-02-14T21:43:29.000Z
|
2020-02-14T21:43:29.000Z
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt5.QtCore import QAbstractItemModel, QFile, QIODevice, QModelIndex, Qt
from PyQt5.QtWidgets import QApplication, QFileDialog, QMainWindow, QTreeView
from PyQt5.QtXml import QDomDocument
class DomItem(object):
def __init__(self, node, row, parent=None):
self.domNode = node
# Record the item's location within its parent.
self.rowNumber = row
self.parentItem = parent
self.childItems = {}
def node(self):
return self.domNode
def parent(self):
return self.parentItem
def child(self, i):
if i in self.childItems:
return self.childItems[i]
if i >= 0 and i < self.domNode.childNodes().count():
childNode = self.domNode.childNodes().item(i)
childItem = DomItem(childNode, i, self)
self.childItems[i] = childItem
return childItem
return None
def row(self):
return self.rowNumber
class DomModel(QAbstractItemModel):
def __init__(self, document, parent=None):
super(DomModel, self).__init__(parent)
self.domDocument = document
self.rootItem = DomItem(self.domDocument, 0)
def columnCount(self, parent):
return 3
def data(self, index, role):
if not index.isValid():
return None
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
node = item.node()
attributes = []
attributeMap = node.attributes()
if index.column() == 0:
return node.nodeName()
elif index.column() == 1:
for i in range(0, attributeMap.count()):
attribute = attributeMap.item(i)
attributes.append(attribute.nodeName() + '="' +
attribute.nodeValue() + '"')
return " ".join(attributes)
if index.column() == 2:
value = node.nodeValue()
if value is None:
return ''
return ' '.join(node.nodeValue().split('\n'))
return None
def flags(self, index):
if not index.isValid():
return Qt.NoItemFlags
return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
if section == 0:
return "Name"
if section == 1:
return "Attributes"
if section == 2:
return "Value"
return None
def index(self, row, column, parent):
if not self.hasIndex(row, column, parent):
return QModelIndex()
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QModelIndex()
def parent(self, child):
if not child.isValid():
return QModelIndex()
childItem = child.internalPointer()
parentItem = childItem.parent()
if not parentItem or parentItem == self.rootItem:
return QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rowCount(self, parent):
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.rootItem
else:
parentItem = parent.internalPointer()
return parentItem.node().childNodes().count()
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction("&Open...", self.openFile, "Ctrl+O")
self.fileMenu.addAction("E&xit", self.close, "Ctrl+Q")
self.xmlPath = ""
self.model = DomModel(QDomDocument(), self)
self.view = QTreeView(self)
self.view.setModel(self.model)
self.setCentralWidget(self.view)
self.setWindowTitle("Simple DOM Model")
def openFile(self):
filePath, _ = QFileDialog.getOpenFileName(self, "Open File",
self.xmlPath, "XML files (*.xml);;HTML files (*.html);;"
"SVG files (*.svg);;User Interface files (*.ui)")
if filePath:
f = QFile(filePath)
if f.open(QIODevice.ReadOnly):
document = QDomDocument()
if document.setContent(f):
newModel = DomModel(document, self)
self.view.setModel(newModel)
self.model = newModel
self.xmlPath = filePath
f.close()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
window = MainWindow()
window.resize(640, 480)
window.show()
sys.exit(app.exec_())
| 31.266667 | 78 | 0.603127 |
23342b18e21c9f8799a4dff86aba7126d1ce09c2
| 1,110 |
py
|
Python
|
src/app/beer_garden/api/http/handlers/vbeta/event.py
|
hazmat345/beer-garden
|
a5fd3eee303d80b6a16d93c89fe8ff42fe39bfbd
|
[
"MIT"
] | null | null | null |
src/app/beer_garden/api/http/handlers/vbeta/event.py
|
hazmat345/beer-garden
|
a5fd3eee303d80b6a16d93c89fe8ff42fe39bfbd
|
[
"MIT"
] | null | null | null |
src/app/beer_garden/api/http/handlers/vbeta/event.py
|
hazmat345/beer-garden
|
a5fd3eee303d80b6a16d93c89fe8ff42fe39bfbd
|
[
"MIT"
] | null | null | null |
from brewtils.schema_parser import SchemaParser
from beer_garden.api.http.authorization import Permissions, authenticated
from beer_garden.api.http.base_handler import BaseHandler
from beer_garden.events import publish
class EventPublisherAPI(BaseHandler):
parser = SchemaParser()
@authenticated(permissions=[Permissions.CREATE])
def post(self):
"""
---
summary: Publish a new event
parameters:
- name: bg-namespace
in: header
required: false
description: Namespace to use
type: string
- name: event
in: body
description: The the Event object
schema:
$ref: '#/definitions/Event'
responses:
204:
description: An Event has been published
400:
$ref: '#/definitions/400Error'
50x:
$ref: '#/definitions/50xError'
tags:
- Event
"""
publish(SchemaParser.parse_event(self.request.decoded_body, from_string=True))
self.set_status(204)
| 27.073171 | 86 | 0.592793 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.