function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
---|---|---|---|---|---|---|
__init__
|
Keyword args:
all_for_sec (int): The length of time to keep the specified snapshots. Measured in seconds.
days (int): The number of days to keep the snapshots after the `all_for_sec` period has passed.
per_day (int): The number of snapshots to keep per day after the `all_for_sec` period has passed.
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_1 import models
class RetentionPolicy(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'all_for_sec': 'int',
'days': 'int',
'per_day': 'int'
}
attribute_map = {
'all_for_sec': 'all_for_sec',
'days': 'days',
'per_day': 'per_day'
}
required_args = {
}
# MASKED: __init__ function (lines 47-64)
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RetentionPolicy`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RetentionPolicy, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RetentionPolicy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
def __init__(
self,
all_for_sec=None, # type: int
days=None, # type: int
per_day=None, # type: int
):
"""
Keyword args:
all_for_sec (int): The length of time to keep the specified snapshots. Measured in seconds.
days (int): The number of days to keep the snapshots after the `all_for_sec` period has passed.
per_day (int): The number of snapshots to keep per day after the `all_for_sec` period has passed.
"""
if all_for_sec is not None:
self.all_for_sec = all_for_sec
if days is not None:
self.days = days
if per_day is not None:
self.per_day = per_day
| 47 | 64 |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_1 import models
class RetentionPolicy(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'all_for_sec': 'int',
'days': 'int',
'per_day': 'int'
}
attribute_map = {
'all_for_sec': 'all_for_sec',
'days': 'days',
'per_day': 'per_day'
}
required_args = {
}
def __init__(
self,
all_for_sec=None, # type: int
days=None, # type: int
per_day=None, # type: int
):
"""
Keyword args:
all_for_sec (int): The length of time to keep the specified snapshots. Measured in seconds.
days (int): The number of days to keep the snapshots after the `all_for_sec` period has passed.
per_day (int): The number of snapshots to keep per day after the `all_for_sec` period has passed.
"""
if all_for_sec is not None:
self.all_for_sec = all_for_sec
if days is not None:
self.days = days
if per_day is not None:
self.per_day = per_day
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RetentionPolicy`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RetentionPolicy, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RetentionPolicy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
_CreateTopic
|
Assures that a topic exists, creating it if necessary.
Also adds GCS as a publisher on that bucket, if necessary.
Args:
pubsub_topic: name of the Cloud Pub/Sub topic to use/create.
service_account: the GCS service account that needs publish permission.
Returns:
true if we modified IAM permissions, otherwise false.
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides the notification command to gsutil."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import getopt
import re
import time
import uuid
from datetime import datetime
from gslib import metrics
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import PublishPermissionDeniedException
from gslib.command import Command
from gslib.command import NO_MAX
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.project_id import PopulateProjectId
from gslib.pubsub_api import PubsubApi
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.pubsub_apitools.pubsub_v1_messages import Binding
from gslib.utils import copy_helper
# Cloud Pub/Sub commands
_LIST_SYNOPSIS = """
gsutil notification list bucket_url...
"""
_DELETE_SYNOPSIS = """
gsutil notification delete (notificationConfigName|bucket_url)...
"""
_CREATE_SYNOPSIS = """
gsutil notification create -f (json|none) [-p prefix] [-t topic] \\
[-m key:value]... [-e eventType]... bucket_url
"""
# Object Change Notification commands
_WATCHBUCKET_SYNOPSIS = """
gsutil notification watchbucket [-i id] [-t token] app_url bucket_url
"""
_STOPCHANNEL_SYNOPSIS = """
gsutil notification stopchannel channel_id resource_id
"""
_SYNOPSIS = (
_CREATE_SYNOPSIS +
_DELETE_SYNOPSIS.lstrip('\n') +
_LIST_SYNOPSIS.lstrip('\n') +
_WATCHBUCKET_SYNOPSIS +
_STOPCHANNEL_SYNOPSIS.lstrip('\n') + '\n') # yapf: disable
_LIST_DESCRIPTION = """
<B>LIST</B>
The list sub-command provides a list of notification configs belonging to a
given bucket. The listed name of each notification config can be used with
the delete sub-command to delete that specific notification config.
For listing Object Change Notifications instead of Cloud Pub/Sub notification
subscription configs, add a -o flag.
<B>LIST EXAMPLES</B>
Fetch the list of notification configs for the bucket example-bucket:
gsutil notification list gs://example-bucket
The same as above, but for Object Change Notifications instead of Cloud
Pub/Sub notification subscription configs:
gsutil notification list -o gs://example-bucket
Fetch the notification configs in all buckets matching a wildcard:
gsutil notification list gs://example-*
Fetch all of the notification configs for buckets in the default project:
gsutil notification list gs://*
"""
_DELETE_DESCRIPTION = """
<B>DELETE</B>
The delete sub-command deletes notification configs from a bucket. If a
notification config name is passed as a parameter, that notification config
alone will be deleted. If a bucket name is passed, all notification configs
associated with that bucket will be deleted.
Cloud Pub/Sub topics associated with this notification config will not be
deleted by this command. Those must be deleted separately, for example with
the gcloud command `gcloud beta pubsub topics delete`.
Object Change Notification subscriptions cannot be deleted with this command.
For that, see the command `gsutil notification stopchannel`.
<B>DELETE EXAMPLES</B>
Delete a single notification config (with ID 3) in the bucket example-bucket:
gsutil notification delete projects/_/buckets/example-bucket/notificationConfigs/3
Delete all notification configs in the bucket example-bucket:
gsutil notification delete gs://example-bucket
"""
_CREATE_DESCRIPTION = """
<B>CREATE</B>
The create sub-command creates a notification config on a bucket, establishing
a flow of event notifications from Cloud Storage to a Cloud Pub/Sub topic. As
part of creating this flow, the create command also verifies that the
destination Cloud Pub/Sub topic exists, creating it if necessary, and verifies
that the Cloud Storage bucket has permission to publish events to that topic,
granting the permission if necessary.
If a destination Cloud Pub/Sub topic is not specified with the -t flag, Cloud
Storage will by default choose a topic name in the default project whose ID is
the same the bucket name. For example, if the default project ID specified is
'default-project' and the bucket being configured is gs://example-bucket, the
create command will use the Cloud Pub/Sub topic
"projects/default-project/topics/example-bucket".
In order to enable notifications, a `special Cloud Storage service account
<https://cloud.google.com/storage/docs/projects#service-accounts>`_ unique to
each project must have the IAM permission "projects.topics.publish". This
command will check to see if that permission exists and, if not, will attempt
to grant it.
You can create multiple notification configurations for a bucket, but their
triggers cannot overlap such that a single event could send multiple
notifications. Attempting to create a notification configuration that
overlaps with an existing notification configuration results in an error.
<B>CREATE EXAMPLES</B>
Begin sending notifications of all changes to the bucket example-bucket
to the Cloud Pub/Sub topic projects/default-project/topics/example-bucket:
gsutil notification create -f json gs://example-bucket
The same as above, but specifies the destination topic ID 'files-to-process'
in the default project:
gsutil notification create -f json \\
-t files-to-process gs://example-bucket
The same as above, but specifies a Cloud Pub/Sub topic belonging to the
specific cloud project 'example-project':
gsutil notification create -f json \\
-t projects/example-project/topics/files-to-process gs://example-bucket
Create a notification config that will only send an event when a new object
has been created:
gsutil notification create -f json -e OBJECT_FINALIZE gs://example-bucket
Create a topic and notification config that will only send an event when
an object beginning with "photos/" is affected:
gsutil notification create -p photos/ gs://example-bucket
List all of the notificationConfigs in bucket example-bucket:
gsutil notification list gs://example-bucket
Delete all notitificationConfigs for bucket example-bucket:
gsutil notification delete gs://example-bucket
Delete one specific notificationConfig for bucket example-bucket:
gsutil notification delete \\
projects/_/buckets/example-bucket/notificationConfigs/1
<B>OPTIONS</B>
The create sub-command has the following options
-e Specify an event type filter for this notification config. Cloud
Storage will only send notifications of this type. You may specify
this parameter multiple times to allow multiple event types. If not
specified, Cloud Storage will send notifications for all event
types. The valid types are:
OBJECT_FINALIZE - An object has been created.
OBJECT_METADATA_UPDATE - The metadata of an object has changed.
OBJECT_DELETE - An object has been permanently deleted.
OBJECT_ARCHIVE - A live Cloud Storage object has been archived.
-f Specifies the payload format of notification messages. Must be
either "json" for a payload matches the object metadata for the
JSON API, or "none" to specify no payload at all. In either case,
notification details are available in the message attributes.
-m Specifies a key:value attribute that will be appended to the set
of attributes sent to Cloud Pub/Sub for all events associated with
this notification config. You may specify this parameter multiple
times to set multiple attributes.
-p Specifies a prefix path filter for this notification config. Cloud
Storage will only send notifications for objects in this bucket
whose names begin with the specified prefix.
-s Skips creation and permission assignment of the Cloud Pub/Sub topic.
This is useful if the caller does not have permission to access
the topic in question, or if the topic already exists and has the
appropriate publish permission assigned.
-t The Cloud Pub/Sub topic to which notifications should be sent. If
not specified, this command will choose a topic whose project is
your default project and whose ID is the same as the Cloud Storage
bucket name.
<B>NEXT STEPS</B>
Once the create command has succeeded, Cloud Storage will publish a message to
the specified Cloud Pub/Sub topic when eligible changes occur. In order to
receive these messages, you must create a Pub/Sub subscription for your
Pub/Sub topic. To learn more about creating Pub/Sub subscriptions, see `the
Pub/Sub Subscriber Overview <https://cloud.google.com/pubsub/docs/subscriber>`_.
You can create a simple Pub/Sub subscription using the ``gcloud`` command-line
tool. For example, to create a new subscription on the topic "myNewTopic" and
attempt to pull messages from it, you could run:
gcloud beta pubsub subscriptions create --topic myNewTopic testSubscription
gcloud beta pubsub subscriptions pull --auto-ack testSubscription
"""
_WATCHBUCKET_DESCRIPTION = """
<B>WATCHBUCKET</B>
The watchbucket sub-command can be used to watch a bucket for object changes.
A service account must be used when running this command.
The app_url parameter must be an HTTPS URL to an application that will be
notified of changes to any object in the bucket. The URL endpoint must be
a verified domain on your project. See `Notification Authorization
<https://cloud.google.com/storage/docs/object-change-notification#_Authorization>`_
for details.
The optional id parameter can be used to assign a unique identifier to the
created notification channel. If not provided, a random UUID string will be
generated.
The optional token parameter can be used to validate notifications events.
To do this, set this custom token and store it to later verify that
notification events contain the client token you expect.
<B>WATCHBUCKET EXAMPLES</B>
Watch the bucket example-bucket for changes and send notifications to an
application server running at example.com:
gsutil notification watchbucket https://example.com/notify \\
gs://example-bucket
Assign identifier my-channel-id to the created notification channel:
gsutil notification watchbucket -i my-channel-id \\
https://example.com/notify gs://example-bucket
Set a custom client token that will be included with each notification event:
gsutil notification watchbucket -t my-client-token \\
https://example.com/notify gs://example-bucket
"""
_STOPCHANNEL_DESCRIPTION = """
<B>STOPCHANNEL</B>
The stopchannel sub-command can be used to stop sending change events to a
notification channel.
The channel_id and resource_id parameters should match the values from the
response of a bucket watch request.
<B>STOPCHANNEL EXAMPLES</B>
Stop the notification event channel with channel identifier channel1 and
resource identifier SoGqan08XDIFWr1Fv_nGpRJBHh8:
gsutil notification stopchannel channel1 SoGqan08XDIFWr1Fv_nGpRJBHh8
"""
_DESCRIPTION = """
The notification command is used to configure Google Cloud Storage support for
sending notifications to Cloud Pub/Sub as well as to configure the object
change notification feature.
<B>CLOUD PUB/SUB</B>
The "create", "list", and "delete" sub-commands deal with configuring Cloud
Storage integration with Google Cloud Pub/Sub.
""" + _CREATE_DESCRIPTION + _LIST_DESCRIPTION + _DELETE_DESCRIPTION + """
<B>OBJECT CHANGE NOTIFICATIONS</B>
For more information on the Object Change Notification feature, please see
`the Object Change Notification docs
<https://cloud.google.com/storage/docs/object-change-notification>`_.
The "watchbucket" and "stopchannel" sub-commands enable and disable Object
Change Notifications.
""" + _WATCHBUCKET_DESCRIPTION + _STOPCHANNEL_DESCRIPTION + """
<B>NOTIFICATIONS AND PARALLEL COMPOSITE UPLOADS</B>
By default, gsutil enables parallel composite uploads for large files (see
"gsutil help cp"), which means that an upload of a large object can result
in multiple temporary component objects being uploaded before the actual
intended object is created. Any subscriber to notifications for this bucket
will then see a notification for each of these components being created and
deleted. If this is a concern for you, note that parallel composite uploads
can be disabled by setting "parallel_composite_upload_threshold = 0" in your
boto config file. Alternately, your subscriber code can filter out gsutil's
parallel composite uploads by ignoring any notification about objects whose
names contain (but do not start with) the following string:
"{composite_namespace}".
""".format(composite_namespace=copy_helper.PARALLEL_UPLOAD_TEMP_NAMESPACE)
NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE = """
Watch bucket attempt failed:
{watch_error}
You attempted to watch a bucket with an application URL of:
{watch_url}
which is not authorized for your project. Please ensure that you are using
Service Account authentication and that the Service Account's project is
authorized for the application URL. Notification endpoint URLs must also be
whitelisted in your Cloud Console project. To do that, the domain must also be
verified using Google Webmaster Tools. For instructions, please see
`Notification Authorization
<https://cloud.google.com/storage/docs/object-change-notification#_Authorization>`_.
"""
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
# yapf: disable
_create_help_text = (
CreateHelpText(_CREATE_SYNOPSIS, _CREATE_DESCRIPTION))
_list_help_text = (
CreateHelpText(_LIST_SYNOPSIS, _LIST_DESCRIPTION))
_delete_help_text = (
CreateHelpText(_DELETE_SYNOPSIS, _DELETE_DESCRIPTION))
_watchbucket_help_text = (
CreateHelpText(_WATCHBUCKET_SYNOPSIS, _WATCHBUCKET_DESCRIPTION))
_stopchannel_help_text = (
CreateHelpText(_STOPCHANNEL_SYNOPSIS, _STOPCHANNEL_DESCRIPTION))
# yapf: enable
PAYLOAD_FORMAT_MAP = {
'none': 'NONE',
'json': 'JSON_API_V1',
}
class NotificationCommand(Command):
"""Implementation of gsutil notification command."""
# Notification names might look like one of these:
# canonical form: projects/_/buckets/bucket/notificationConfigs/3
# JSON API form: b/bucket/notificationConfigs/5
# Either of the above might start with a / if a user is copying & pasting.
def _GetNotificationPathRegex(self):
if not NotificationCommand._notification_path_regex:
NotificationCommand._notification_path_regex = re.compile(
('/?(projects/[^/]+/)?b(uckets)?/(?P<bucket>[^/]+)/'
'notificationConfigs/(?P<notification>[0-9]+)'))
return NotificationCommand._notification_path_regex
_notification_path_regex = None
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'notification',
command_name_aliases=[
'notify',
'notifyconfig',
'notifications',
'notif',
],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='i:t:m:t:of:e:p:s',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'watchbucket': [
CommandArgument.MakeFreeTextArgument(),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),
],
'stopchannel': [],
'list': [
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),
],
'delete': [
# Takes a list of one of the following:
# notification: projects/_/buckets/bla/notificationConfigs/5,
# bucket: gs://foobar
CommandArgument.MakeZeroOrMoreCloudURLsArgument(),
],
'create': [
CommandArgument.MakeFreeTextArgument(), # Cloud Pub/Sub topic
CommandArgument.MakeNCloudBucketURLsArgument(1),
]
},
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='notification',
help_name_aliases=[
'watchbucket',
'stopchannel',
'notifyconfig',
],
help_type='command_help',
help_one_line_summary='Configure object change notification',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'create': _create_help_text,
'list': _list_help_text,
'delete': _delete_help_text,
'watchbucket': _watchbucket_help_text,
'stopchannel': _stopchannel_help_text,
},
)
def _WatchBucket(self):
"""Creates a watch on a bucket given in self.args."""
self.CheckArguments()
identifier = None
client_token = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-i':
identifier = a
if o == '-t':
client_token = a
identifier = identifier or str(uuid.uuid4())
watch_url = self.args[0]
bucket_arg = self.args[-1]
if not watch_url.lower().startswith('https://'):
raise CommandException('The application URL must be an https:// URL.')
bucket_url = StorageUrlFromString(bucket_arg)
if not (bucket_url.IsBucket() and bucket_url.scheme == 'gs'):
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
if not bucket_url.IsBucket():
raise CommandException('URL must name a bucket for the %s command.' %
self.command_name)
self.logger.info('Watching bucket %s with application URL %s ...',
bucket_url, watch_url)
try:
channel = self.gsutil_api.WatchBucket(bucket_url.bucket_name,
watch_url,
identifier,
token=client_token,
provider=bucket_url.scheme)
except AccessDeniedException as e:
self.logger.warn(
NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE.format(watch_error=str(e),
watch_url=watch_url))
raise
channel_id = channel.id
resource_id = channel.resourceId
client_token = channel.token
self.logger.info('Successfully created watch notification channel.')
self.logger.info('Watch channel identifier: %s', channel_id)
self.logger.info('Canonicalized resource identifier: %s', resource_id)
self.logger.info('Client state token: %s', client_token)
return 0
def _StopChannel(self):
channel_id = self.args[0]
resource_id = self.args[1]
self.logger.info('Removing channel %s with resource identifier %s ...',
channel_id, resource_id)
self.gsutil_api.StopChannel(channel_id, resource_id, provider='gs')
self.logger.info('Succesfully removed channel.')
return 0
def _ListChannels(self, bucket_arg):
"""Lists active channel watches on a bucket given in self.args."""
bucket_url = StorageUrlFromString(bucket_arg)
if not (bucket_url.IsBucket() and bucket_url.scheme == 'gs'):
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
if not bucket_url.IsBucket():
raise CommandException('URL must name a bucket for the %s command.' %
self.command_name)
channels = self.gsutil_api.ListChannels(bucket_url.bucket_name,
provider='gs').items
self.logger.info(
'Bucket %s has the following active Object Change Notifications:',
bucket_url.bucket_name)
for idx, channel in enumerate(channels):
self.logger.info('\tNotification channel %d:', idx + 1)
self.logger.info('\t\tChannel identifier: %s', channel.channel_id)
self.logger.info('\t\tResource identifier: %s', channel.resource_id)
self.logger.info('\t\tApplication URL: %s', channel.push_url)
self.logger.info('\t\tCreated by: %s', channel.subscriber_email)
self.logger.info(
'\t\tCreation time: %s',
str(datetime.fromtimestamp(channel.creation_time_ms / 1000)))
return 0
def _Create(self):
self.CheckArguments()
# User-specified options
pubsub_topic = None
payload_format = None
custom_attributes = {}
event_types = []
object_name_prefix = None
should_setup_topic = True
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-e':
event_types.append(a)
elif o == '-f':
payload_format = a
elif o == '-m':
if ':' not in a:
raise CommandException(
'Custom attributes specified with -m should be of the form '
'key:value')
key, value = a.split(':')
custom_attributes[key] = value
elif o == '-p':
object_name_prefix = a
elif o == '-s':
should_setup_topic = False
elif o == '-t':
pubsub_topic = a
if payload_format not in PAYLOAD_FORMAT_MAP:
raise CommandException(
"Must provide a payload format with -f of either 'json' or 'none'")
payload_format = PAYLOAD_FORMAT_MAP[payload_format]
bucket_arg = self.args[-1]
bucket_url = StorageUrlFromString(bucket_arg)
if not bucket_url.IsCloudUrl() or not bucket_url.IsBucket():
raise CommandException(
"%s %s requires a GCS bucket name, but got '%s'" %
(self.command_name, self.subcommand_name, bucket_arg))
if bucket_url.scheme != 'gs':
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
bucket_name = bucket_url.bucket_name
self.logger.debug('Creating notification for bucket %s', bucket_url)
# Find the project this bucket belongs to
bucket_metadata = self.gsutil_api.GetBucket(bucket_name,
fields=['projectNumber'],
provider=bucket_url.scheme)
bucket_project_number = bucket_metadata.projectNumber
# If not specified, choose a sensible default for the Cloud Pub/Sub topic
# name.
if not pubsub_topic:
pubsub_topic = 'projects/%s/topics/%s' % (PopulateProjectId(None),
bucket_name)
if not pubsub_topic.startswith('projects/'):
# If a user picks a topic ID (mytopic) but doesn't pass the whole name (
# projects/my-project/topics/mytopic ), pick a default project.
pubsub_topic = 'projects/%s/topics/%s' % (PopulateProjectId(None),
pubsub_topic)
self.logger.debug('Using Cloud Pub/Sub topic %s', pubsub_topic)
just_modified_topic_permissions = False
if should_setup_topic:
# Ask GCS for the email address that represents GCS's permission to
# publish to a Cloud Pub/Sub topic from this project.
service_account = self.gsutil_api.GetProjectServiceAccount(
bucket_project_number, provider=bucket_url.scheme).email_address
self.logger.debug('Service account for project %d: %s',
bucket_project_number, service_account)
just_modified_topic_permissions = self._CreateTopic(
pubsub_topic, service_account)
for attempt_number in range(0, 2):
try:
create_response = self.gsutil_api.CreateNotificationConfig(
bucket_name,
pubsub_topic=pubsub_topic,
payload_format=payload_format,
custom_attributes=custom_attributes,
event_types=event_types if event_types else None,
object_name_prefix=object_name_prefix,
provider=bucket_url.scheme)
break
except PublishPermissionDeniedException:
if attempt_number == 0 and just_modified_topic_permissions:
# If we have just set the IAM policy, it may take up to 10 seconds to
# take effect.
self.logger.info(
'Retrying create notification in 10 seconds '
'(new permissions may take up to 10 seconds to take effect.)')
time.sleep(10)
else:
raise
notification_name = 'projects/_/buckets/%s/notificationConfigs/%s' % (
bucket_name, create_response.id)
self.logger.info('Created notification config %s', notification_name)
return 0
# MASKED: _CreateTopic function (lines 645-689)
def _EnumerateNotificationsFromArgs(self, accept_notification_configs=True):
"""Yields bucket/notification tuples from command-line args.
Given a list of strings that are bucket names (gs://foo) or notification
config IDs, yield tuples of bucket names and their associated notifications.
Args:
accept_notification_configs: whether notification configs are valid args.
Yields:
Tuples of the form (bucket_name, Notification)
"""
path_regex = self._GetNotificationPathRegex()
for list_entry in self.args:
match = path_regex.match(list_entry)
if match:
if not accept_notification_configs:
raise CommandException(
'%s %s accepts only bucket names, but you provided %s' %
(self.command_name, self.subcommand_name, list_entry))
bucket_name = match.group('bucket')
notification_id = match.group('notification')
found = False
for notification in self.gsutil_api.ListNotificationConfigs(
bucket_name, provider='gs'):
if notification.id == notification_id:
yield (bucket_name, notification)
found = True
break
if not found:
raise NotFoundException('Could not find notification %s' % list_entry)
else:
storage_url = StorageUrlFromString(list_entry)
if not storage_url.IsCloudUrl():
raise CommandException(
'The %s command must be used on cloud buckets or notification '
'config names.' % self.command_name)
if storage_url.scheme != 'gs':
raise CommandException('The %s command only works on gs:// buckets.')
path = None
if storage_url.IsProvider():
path = 'gs://*'
elif storage_url.IsBucket():
path = list_entry
if not path:
raise CommandException(
'The %s command cannot be used on cloud objects, only buckets' %
self.command_name)
for blr in self.WildcardIterator(path).IterBuckets(
bucket_fields=['id']):
for notification in self.gsutil_api.ListNotificationConfigs(
blr.storage_url.bucket_name, provider='gs'):
yield (blr.storage_url.bucket_name, notification)
def _List(self):
self.CheckArguments()
if self.sub_opts:
if '-o' in dict(self.sub_opts):
for bucket_name in self.args:
self._ListChannels(bucket_name)
else:
for bucket_name, notification in self._EnumerateNotificationsFromArgs(
accept_notification_configs=False):
self._PrintNotificationDetails(bucket_name, notification)
return 0
def _PrintNotificationDetails(self, bucket, notification):
print('projects/_/buckets/{bucket}/notificationConfigs/{notification}\n'
'\tCloud Pub/Sub topic: {topic}'.format(
bucket=bucket,
notification=notification.id,
topic=notification.topic[len('//pubsub.googleapis.com/'):]))
if notification.custom_attributes:
print('\tCustom attributes:')
for attr in notification.custom_attributes.additionalProperties:
print('\t\t%s: %s' % (attr.key, attr.value))
filters = []
if notification.event_types:
filters.append('\t\tEvent Types: %s' %
', '.join(notification.event_types))
if notification.object_name_prefix:
filters.append("\t\tObject name prefix: '%s'" %
notification.object_name_prefix)
if filters:
print('\tFilters:')
for line in filters:
print(line)
self.logger.info('')
def _Delete(self):
for bucket_name, notification in self._EnumerateNotificationsFromArgs():
self._DeleteNotification(bucket_name, notification.id)
return 0
def _DeleteNotification(self, bucket_name, notification_id):
self.gsutil_api.DeleteNotificationConfig(bucket_name,
notification=notification_id,
provider='gs')
return 0
def _RunSubCommand(self, func):
try:
(self.sub_opts,
self.args) = getopt.getopt(self.args,
self.command_spec.supported_sub_args)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
metrics.LogCommandParams(sub_opts=self.sub_opts)
return func(self)
except getopt.GetoptError:
self.RaiseInvalidArgumentException()
SUBCOMMANDS = {
'create': _Create,
'list': _List,
'delete': _Delete,
'watchbucket': _WatchBucket,
'stopchannel': _StopChannel
}
def RunCommand(self):
"""Command entry point for the notification command."""
self.subcommand_name = self.args.pop(0)
if self.subcommand_name in NotificationCommand.SUBCOMMANDS:
metrics.LogCommandParams(subcommands=[self.subcommand_name])
return self._RunSubCommand(
NotificationCommand.SUBCOMMANDS[self.subcommand_name])
else:
raise CommandException('Invalid subcommand "%s" for the %s command.' %
(self.subcommand_name, self.command_name))
|
def _CreateTopic(self, pubsub_topic, service_account):
"""Assures that a topic exists, creating it if necessary.
Also adds GCS as a publisher on that bucket, if necessary.
Args:
pubsub_topic: name of the Cloud Pub/Sub topic to use/create.
service_account: the GCS service account that needs publish permission.
Returns:
true if we modified IAM permissions, otherwise false.
"""
pubsub_api = PubsubApi(logger=self.logger)
# Verify that the Pub/Sub topic exists. If it does not, create it.
try:
pubsub_api.GetTopic(topic_name=pubsub_topic)
self.logger.debug('Topic %s already exists', pubsub_topic)
except NotFoundException:
self.logger.debug('Creating topic %s', pubsub_topic)
pubsub_api.CreateTopic(topic_name=pubsub_topic)
self.logger.info('Created Cloud Pub/Sub topic %s', pubsub_topic)
# Verify that the service account is in the IAM policy.
policy = pubsub_api.GetTopicIamPolicy(topic_name=pubsub_topic)
binding = Binding(role='roles/pubsub.publisher',
members=['serviceAccount:%s' % service_account])
# This could be more extensive. We could, for instance, check for roles
# that are stronger that pubsub.publisher, like owner. We could also
# recurse up the hierarchy looking to see if there are project-level
# permissions. This can get very complex very quickly, as the caller
# may not necessarily have access to the project-level IAM policy.
# There's no danger in double-granting permission just to make sure it's
# there, though.
if binding not in policy.bindings:
policy.bindings.append(binding)
# transactional safety via etag field.
pubsub_api.SetTopicIamPolicy(topic_name=pubsub_topic, policy=policy)
return True
else:
self.logger.debug('GCS already has publish permission to topic %s.',
pubsub_topic)
return False
| 645 | 689 |
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides the notification command to gsutil."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import getopt
import re
import time
import uuid
from datetime import datetime
from gslib import metrics
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import PublishPermissionDeniedException
from gslib.command import Command
from gslib.command import NO_MAX
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.project_id import PopulateProjectId
from gslib.pubsub_api import PubsubApi
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.pubsub_apitools.pubsub_v1_messages import Binding
from gslib.utils import copy_helper
# Cloud Pub/Sub commands
_LIST_SYNOPSIS = """
gsutil notification list bucket_url...
"""
_DELETE_SYNOPSIS = """
gsutil notification delete (notificationConfigName|bucket_url)...
"""
_CREATE_SYNOPSIS = """
gsutil notification create -f (json|none) [-p prefix] [-t topic] \\
[-m key:value]... [-e eventType]... bucket_url
"""
# Object Change Notification commands
_WATCHBUCKET_SYNOPSIS = """
gsutil notification watchbucket [-i id] [-t token] app_url bucket_url
"""
_STOPCHANNEL_SYNOPSIS = """
gsutil notification stopchannel channel_id resource_id
"""
_SYNOPSIS = (
_CREATE_SYNOPSIS +
_DELETE_SYNOPSIS.lstrip('\n') +
_LIST_SYNOPSIS.lstrip('\n') +
_WATCHBUCKET_SYNOPSIS +
_STOPCHANNEL_SYNOPSIS.lstrip('\n') + '\n') # yapf: disable
_LIST_DESCRIPTION = """
<B>LIST</B>
The list sub-command provides a list of notification configs belonging to a
given bucket. The listed name of each notification config can be used with
the delete sub-command to delete that specific notification config.
For listing Object Change Notifications instead of Cloud Pub/Sub notification
subscription configs, add a -o flag.
<B>LIST EXAMPLES</B>
Fetch the list of notification configs for the bucket example-bucket:
gsutil notification list gs://example-bucket
The same as above, but for Object Change Notifications instead of Cloud
Pub/Sub notification subscription configs:
gsutil notification list -o gs://example-bucket
Fetch the notification configs in all buckets matching a wildcard:
gsutil notification list gs://example-*
Fetch all of the notification configs for buckets in the default project:
gsutil notification list gs://*
"""
_DELETE_DESCRIPTION = """
<B>DELETE</B>
The delete sub-command deletes notification configs from a bucket. If a
notification config name is passed as a parameter, that notification config
alone will be deleted. If a bucket name is passed, all notification configs
associated with that bucket will be deleted.
Cloud Pub/Sub topics associated with this notification config will not be
deleted by this command. Those must be deleted separately, for example with
the gcloud command `gcloud beta pubsub topics delete`.
Object Change Notification subscriptions cannot be deleted with this command.
For that, see the command `gsutil notification stopchannel`.
<B>DELETE EXAMPLES</B>
Delete a single notification config (with ID 3) in the bucket example-bucket:
gsutil notification delete projects/_/buckets/example-bucket/notificationConfigs/3
Delete all notification configs in the bucket example-bucket:
gsutil notification delete gs://example-bucket
"""
_CREATE_DESCRIPTION = """
<B>CREATE</B>
The create sub-command creates a notification config on a bucket, establishing
a flow of event notifications from Cloud Storage to a Cloud Pub/Sub topic. As
part of creating this flow, the create command also verifies that the
destination Cloud Pub/Sub topic exists, creating it if necessary, and verifies
that the Cloud Storage bucket has permission to publish events to that topic,
granting the permission if necessary.
If a destination Cloud Pub/Sub topic is not specified with the -t flag, Cloud
Storage will by default choose a topic name in the default project whose ID is
the same the bucket name. For example, if the default project ID specified is
'default-project' and the bucket being configured is gs://example-bucket, the
create command will use the Cloud Pub/Sub topic
"projects/default-project/topics/example-bucket".
In order to enable notifications, a `special Cloud Storage service account
<https://cloud.google.com/storage/docs/projects#service-accounts>`_ unique to
each project must have the IAM permission "projects.topics.publish". This
command will check to see if that permission exists and, if not, will attempt
to grant it.
You can create multiple notification configurations for a bucket, but their
triggers cannot overlap such that a single event could send multiple
notifications. Attempting to create a notification configuration that
overlaps with an existing notification configuration results in an error.
<B>CREATE EXAMPLES</B>
Begin sending notifications of all changes to the bucket example-bucket
to the Cloud Pub/Sub topic projects/default-project/topics/example-bucket:
gsutil notification create -f json gs://example-bucket
The same as above, but specifies the destination topic ID 'files-to-process'
in the default project:
gsutil notification create -f json \\
-t files-to-process gs://example-bucket
The same as above, but specifies a Cloud Pub/Sub topic belonging to the
specific cloud project 'example-project':
gsutil notification create -f json \\
-t projects/example-project/topics/files-to-process gs://example-bucket
Create a notification config that will only send an event when a new object
has been created:
gsutil notification create -f json -e OBJECT_FINALIZE gs://example-bucket
Create a topic and notification config that will only send an event when
an object beginning with "photos/" is affected:
gsutil notification create -p photos/ gs://example-bucket
List all of the notificationConfigs in bucket example-bucket:
gsutil notification list gs://example-bucket
Delete all notitificationConfigs for bucket example-bucket:
gsutil notification delete gs://example-bucket
Delete one specific notificationConfig for bucket example-bucket:
gsutil notification delete \\
projects/_/buckets/example-bucket/notificationConfigs/1
<B>OPTIONS</B>
The create sub-command has the following options
-e Specify an event type filter for this notification config. Cloud
Storage will only send notifications of this type. You may specify
this parameter multiple times to allow multiple event types. If not
specified, Cloud Storage will send notifications for all event
types. The valid types are:
OBJECT_FINALIZE - An object has been created.
OBJECT_METADATA_UPDATE - The metadata of an object has changed.
OBJECT_DELETE - An object has been permanently deleted.
OBJECT_ARCHIVE - A live Cloud Storage object has been archived.
-f Specifies the payload format of notification messages. Must be
either "json" for a payload matches the object metadata for the
JSON API, or "none" to specify no payload at all. In either case,
notification details are available in the message attributes.
-m Specifies a key:value attribute that will be appended to the set
of attributes sent to Cloud Pub/Sub for all events associated with
this notification config. You may specify this parameter multiple
times to set multiple attributes.
-p Specifies a prefix path filter for this notification config. Cloud
Storage will only send notifications for objects in this bucket
whose names begin with the specified prefix.
-s Skips creation and permission assignment of the Cloud Pub/Sub topic.
This is useful if the caller does not have permission to access
the topic in question, or if the topic already exists and has the
appropriate publish permission assigned.
-t The Cloud Pub/Sub topic to which notifications should be sent. If
not specified, this command will choose a topic whose project is
your default project and whose ID is the same as the Cloud Storage
bucket name.
<B>NEXT STEPS</B>
Once the create command has succeeded, Cloud Storage will publish a message to
the specified Cloud Pub/Sub topic when eligible changes occur. In order to
receive these messages, you must create a Pub/Sub subscription for your
Pub/Sub topic. To learn more about creating Pub/Sub subscriptions, see `the
Pub/Sub Subscriber Overview <https://cloud.google.com/pubsub/docs/subscriber>`_.
You can create a simple Pub/Sub subscription using the ``gcloud`` command-line
tool. For example, to create a new subscription on the topic "myNewTopic" and
attempt to pull messages from it, you could run:
gcloud beta pubsub subscriptions create --topic myNewTopic testSubscription
gcloud beta pubsub subscriptions pull --auto-ack testSubscription
"""
_WATCHBUCKET_DESCRIPTION = """
<B>WATCHBUCKET</B>
The watchbucket sub-command can be used to watch a bucket for object changes.
A service account must be used when running this command.
The app_url parameter must be an HTTPS URL to an application that will be
notified of changes to any object in the bucket. The URL endpoint must be
a verified domain on your project. See `Notification Authorization
<https://cloud.google.com/storage/docs/object-change-notification#_Authorization>`_
for details.
The optional id parameter can be used to assign a unique identifier to the
created notification channel. If not provided, a random UUID string will be
generated.
The optional token parameter can be used to validate notifications events.
To do this, set this custom token and store it to later verify that
notification events contain the client token you expect.
<B>WATCHBUCKET EXAMPLES</B>
Watch the bucket example-bucket for changes and send notifications to an
application server running at example.com:
gsutil notification watchbucket https://example.com/notify \\
gs://example-bucket
Assign identifier my-channel-id to the created notification channel:
gsutil notification watchbucket -i my-channel-id \\
https://example.com/notify gs://example-bucket
Set a custom client token that will be included with each notification event:
gsutil notification watchbucket -t my-client-token \\
https://example.com/notify gs://example-bucket
"""
_STOPCHANNEL_DESCRIPTION = """
<B>STOPCHANNEL</B>
The stopchannel sub-command can be used to stop sending change events to a
notification channel.
The channel_id and resource_id parameters should match the values from the
response of a bucket watch request.
<B>STOPCHANNEL EXAMPLES</B>
Stop the notification event channel with channel identifier channel1 and
resource identifier SoGqan08XDIFWr1Fv_nGpRJBHh8:
gsutil notification stopchannel channel1 SoGqan08XDIFWr1Fv_nGpRJBHh8
"""
_DESCRIPTION = """
The notification command is used to configure Google Cloud Storage support for
sending notifications to Cloud Pub/Sub as well as to configure the object
change notification feature.
<B>CLOUD PUB/SUB</B>
The "create", "list", and "delete" sub-commands deal with configuring Cloud
Storage integration with Google Cloud Pub/Sub.
""" + _CREATE_DESCRIPTION + _LIST_DESCRIPTION + _DELETE_DESCRIPTION + """
<B>OBJECT CHANGE NOTIFICATIONS</B>
For more information on the Object Change Notification feature, please see
`the Object Change Notification docs
<https://cloud.google.com/storage/docs/object-change-notification>`_.
The "watchbucket" and "stopchannel" sub-commands enable and disable Object
Change Notifications.
""" + _WATCHBUCKET_DESCRIPTION + _STOPCHANNEL_DESCRIPTION + """
<B>NOTIFICATIONS AND PARALLEL COMPOSITE UPLOADS</B>
By default, gsutil enables parallel composite uploads for large files (see
"gsutil help cp"), which means that an upload of a large object can result
in multiple temporary component objects being uploaded before the actual
intended object is created. Any subscriber to notifications for this bucket
will then see a notification for each of these components being created and
deleted. If this is a concern for you, note that parallel composite uploads
can be disabled by setting "parallel_composite_upload_threshold = 0" in your
boto config file. Alternately, your subscriber code can filter out gsutil's
parallel composite uploads by ignoring any notification about objects whose
names contain (but do not start with) the following string:
"{composite_namespace}".
""".format(composite_namespace=copy_helper.PARALLEL_UPLOAD_TEMP_NAMESPACE)
NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE = """
Watch bucket attempt failed:
{watch_error}
You attempted to watch a bucket with an application URL of:
{watch_url}
which is not authorized for your project. Please ensure that you are using
Service Account authentication and that the Service Account's project is
authorized for the application URL. Notification endpoint URLs must also be
whitelisted in your Cloud Console project. To do that, the domain must also be
verified using Google Webmaster Tools. For instructions, please see
`Notification Authorization
<https://cloud.google.com/storage/docs/object-change-notification#_Authorization>`_.
"""
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
# yapf: disable
_create_help_text = (
CreateHelpText(_CREATE_SYNOPSIS, _CREATE_DESCRIPTION))
_list_help_text = (
CreateHelpText(_LIST_SYNOPSIS, _LIST_DESCRIPTION))
_delete_help_text = (
CreateHelpText(_DELETE_SYNOPSIS, _DELETE_DESCRIPTION))
_watchbucket_help_text = (
CreateHelpText(_WATCHBUCKET_SYNOPSIS, _WATCHBUCKET_DESCRIPTION))
_stopchannel_help_text = (
CreateHelpText(_STOPCHANNEL_SYNOPSIS, _STOPCHANNEL_DESCRIPTION))
# yapf: enable
PAYLOAD_FORMAT_MAP = {
'none': 'NONE',
'json': 'JSON_API_V1',
}
class NotificationCommand(Command):
"""Implementation of gsutil notification command."""
# Notification names might look like one of these:
# canonical form: projects/_/buckets/bucket/notificationConfigs/3
# JSON API form: b/bucket/notificationConfigs/5
# Either of the above might start with a / if a user is copying & pasting.
def _GetNotificationPathRegex(self):
if not NotificationCommand._notification_path_regex:
NotificationCommand._notification_path_regex = re.compile(
('/?(projects/[^/]+/)?b(uckets)?/(?P<bucket>[^/]+)/'
'notificationConfigs/(?P<notification>[0-9]+)'))
return NotificationCommand._notification_path_regex
_notification_path_regex = None
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'notification',
command_name_aliases=[
'notify',
'notifyconfig',
'notifications',
'notif',
],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='i:t:m:t:of:e:p:s',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'watchbucket': [
CommandArgument.MakeFreeTextArgument(),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),
],
'stopchannel': [],
'list': [
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),
],
'delete': [
# Takes a list of one of the following:
# notification: projects/_/buckets/bla/notificationConfigs/5,
# bucket: gs://foobar
CommandArgument.MakeZeroOrMoreCloudURLsArgument(),
],
'create': [
CommandArgument.MakeFreeTextArgument(), # Cloud Pub/Sub topic
CommandArgument.MakeNCloudBucketURLsArgument(1),
]
},
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='notification',
help_name_aliases=[
'watchbucket',
'stopchannel',
'notifyconfig',
],
help_type='command_help',
help_one_line_summary='Configure object change notification',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'create': _create_help_text,
'list': _list_help_text,
'delete': _delete_help_text,
'watchbucket': _watchbucket_help_text,
'stopchannel': _stopchannel_help_text,
},
)
def _WatchBucket(self):
"""Creates a watch on a bucket given in self.args."""
self.CheckArguments()
identifier = None
client_token = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-i':
identifier = a
if o == '-t':
client_token = a
identifier = identifier or str(uuid.uuid4())
watch_url = self.args[0]
bucket_arg = self.args[-1]
if not watch_url.lower().startswith('https://'):
raise CommandException('The application URL must be an https:// URL.')
bucket_url = StorageUrlFromString(bucket_arg)
if not (bucket_url.IsBucket() and bucket_url.scheme == 'gs'):
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
if not bucket_url.IsBucket():
raise CommandException('URL must name a bucket for the %s command.' %
self.command_name)
self.logger.info('Watching bucket %s with application URL %s ...',
bucket_url, watch_url)
try:
channel = self.gsutil_api.WatchBucket(bucket_url.bucket_name,
watch_url,
identifier,
token=client_token,
provider=bucket_url.scheme)
except AccessDeniedException as e:
self.logger.warn(
NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE.format(watch_error=str(e),
watch_url=watch_url))
raise
channel_id = channel.id
resource_id = channel.resourceId
client_token = channel.token
self.logger.info('Successfully created watch notification channel.')
self.logger.info('Watch channel identifier: %s', channel_id)
self.logger.info('Canonicalized resource identifier: %s', resource_id)
self.logger.info('Client state token: %s', client_token)
return 0
def _StopChannel(self):
channel_id = self.args[0]
resource_id = self.args[1]
self.logger.info('Removing channel %s with resource identifier %s ...',
channel_id, resource_id)
self.gsutil_api.StopChannel(channel_id, resource_id, provider='gs')
self.logger.info('Succesfully removed channel.')
return 0
def _ListChannels(self, bucket_arg):
"""Lists active channel watches on a bucket given in self.args."""
bucket_url = StorageUrlFromString(bucket_arg)
if not (bucket_url.IsBucket() and bucket_url.scheme == 'gs'):
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
if not bucket_url.IsBucket():
raise CommandException('URL must name a bucket for the %s command.' %
self.command_name)
channels = self.gsutil_api.ListChannels(bucket_url.bucket_name,
provider='gs').items
self.logger.info(
'Bucket %s has the following active Object Change Notifications:',
bucket_url.bucket_name)
for idx, channel in enumerate(channels):
self.logger.info('\tNotification channel %d:', idx + 1)
self.logger.info('\t\tChannel identifier: %s', channel.channel_id)
self.logger.info('\t\tResource identifier: %s', channel.resource_id)
self.logger.info('\t\tApplication URL: %s', channel.push_url)
self.logger.info('\t\tCreated by: %s', channel.subscriber_email)
self.logger.info(
'\t\tCreation time: %s',
str(datetime.fromtimestamp(channel.creation_time_ms / 1000)))
return 0
def _Create(self):
self.CheckArguments()
# User-specified options
pubsub_topic = None
payload_format = None
custom_attributes = {}
event_types = []
object_name_prefix = None
should_setup_topic = True
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-e':
event_types.append(a)
elif o == '-f':
payload_format = a
elif o == '-m':
if ':' not in a:
raise CommandException(
'Custom attributes specified with -m should be of the form '
'key:value')
key, value = a.split(':')
custom_attributes[key] = value
elif o == '-p':
object_name_prefix = a
elif o == '-s':
should_setup_topic = False
elif o == '-t':
pubsub_topic = a
if payload_format not in PAYLOAD_FORMAT_MAP:
raise CommandException(
"Must provide a payload format with -f of either 'json' or 'none'")
payload_format = PAYLOAD_FORMAT_MAP[payload_format]
bucket_arg = self.args[-1]
bucket_url = StorageUrlFromString(bucket_arg)
if not bucket_url.IsCloudUrl() or not bucket_url.IsBucket():
raise CommandException(
"%s %s requires a GCS bucket name, but got '%s'" %
(self.command_name, self.subcommand_name, bucket_arg))
if bucket_url.scheme != 'gs':
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
bucket_name = bucket_url.bucket_name
self.logger.debug('Creating notification for bucket %s', bucket_url)
# Find the project this bucket belongs to
bucket_metadata = self.gsutil_api.GetBucket(bucket_name,
fields=['projectNumber'],
provider=bucket_url.scheme)
bucket_project_number = bucket_metadata.projectNumber
# If not specified, choose a sensible default for the Cloud Pub/Sub topic
# name.
if not pubsub_topic:
pubsub_topic = 'projects/%s/topics/%s' % (PopulateProjectId(None),
bucket_name)
if not pubsub_topic.startswith('projects/'):
# If a user picks a topic ID (mytopic) but doesn't pass the whole name (
# projects/my-project/topics/mytopic ), pick a default project.
pubsub_topic = 'projects/%s/topics/%s' % (PopulateProjectId(None),
pubsub_topic)
self.logger.debug('Using Cloud Pub/Sub topic %s', pubsub_topic)
just_modified_topic_permissions = False
if should_setup_topic:
# Ask GCS for the email address that represents GCS's permission to
# publish to a Cloud Pub/Sub topic from this project.
service_account = self.gsutil_api.GetProjectServiceAccount(
bucket_project_number, provider=bucket_url.scheme).email_address
self.logger.debug('Service account for project %d: %s',
bucket_project_number, service_account)
just_modified_topic_permissions = self._CreateTopic(
pubsub_topic, service_account)
for attempt_number in range(0, 2):
try:
create_response = self.gsutil_api.CreateNotificationConfig(
bucket_name,
pubsub_topic=pubsub_topic,
payload_format=payload_format,
custom_attributes=custom_attributes,
event_types=event_types if event_types else None,
object_name_prefix=object_name_prefix,
provider=bucket_url.scheme)
break
except PublishPermissionDeniedException:
if attempt_number == 0 and just_modified_topic_permissions:
# If we have just set the IAM policy, it may take up to 10 seconds to
# take effect.
self.logger.info(
'Retrying create notification in 10 seconds '
'(new permissions may take up to 10 seconds to take effect.)')
time.sleep(10)
else:
raise
notification_name = 'projects/_/buckets/%s/notificationConfigs/%s' % (
bucket_name, create_response.id)
self.logger.info('Created notification config %s', notification_name)
return 0
def _CreateTopic(self, pubsub_topic, service_account):
"""Assures that a topic exists, creating it if necessary.
Also adds GCS as a publisher on that bucket, if necessary.
Args:
pubsub_topic: name of the Cloud Pub/Sub topic to use/create.
service_account: the GCS service account that needs publish permission.
Returns:
true if we modified IAM permissions, otherwise false.
"""
pubsub_api = PubsubApi(logger=self.logger)
# Verify that the Pub/Sub topic exists. If it does not, create it.
try:
pubsub_api.GetTopic(topic_name=pubsub_topic)
self.logger.debug('Topic %s already exists', pubsub_topic)
except NotFoundException:
self.logger.debug('Creating topic %s', pubsub_topic)
pubsub_api.CreateTopic(topic_name=pubsub_topic)
self.logger.info('Created Cloud Pub/Sub topic %s', pubsub_topic)
# Verify that the service account is in the IAM policy.
policy = pubsub_api.GetTopicIamPolicy(topic_name=pubsub_topic)
binding = Binding(role='roles/pubsub.publisher',
members=['serviceAccount:%s' % service_account])
# This could be more extensive. We could, for instance, check for roles
# that are stronger that pubsub.publisher, like owner. We could also
# recurse up the hierarchy looking to see if there are project-level
# permissions. This can get very complex very quickly, as the caller
# may not necessarily have access to the project-level IAM policy.
# There's no danger in double-granting permission just to make sure it's
# there, though.
if binding not in policy.bindings:
policy.bindings.append(binding)
# transactional safety via etag field.
pubsub_api.SetTopicIamPolicy(topic_name=pubsub_topic, policy=policy)
return True
else:
self.logger.debug('GCS already has publish permission to topic %s.',
pubsub_topic)
return False
def _EnumerateNotificationsFromArgs(self, accept_notification_configs=True):
"""Yields bucket/notification tuples from command-line args.
Given a list of strings that are bucket names (gs://foo) or notification
config IDs, yield tuples of bucket names and their associated notifications.
Args:
accept_notification_configs: whether notification configs are valid args.
Yields:
Tuples of the form (bucket_name, Notification)
"""
path_regex = self._GetNotificationPathRegex()
for list_entry in self.args:
match = path_regex.match(list_entry)
if match:
if not accept_notification_configs:
raise CommandException(
'%s %s accepts only bucket names, but you provided %s' %
(self.command_name, self.subcommand_name, list_entry))
bucket_name = match.group('bucket')
notification_id = match.group('notification')
found = False
for notification in self.gsutil_api.ListNotificationConfigs(
bucket_name, provider='gs'):
if notification.id == notification_id:
yield (bucket_name, notification)
found = True
break
if not found:
raise NotFoundException('Could not find notification %s' % list_entry)
else:
storage_url = StorageUrlFromString(list_entry)
if not storage_url.IsCloudUrl():
raise CommandException(
'The %s command must be used on cloud buckets or notification '
'config names.' % self.command_name)
if storage_url.scheme != 'gs':
raise CommandException('The %s command only works on gs:// buckets.')
path = None
if storage_url.IsProvider():
path = 'gs://*'
elif storage_url.IsBucket():
path = list_entry
if not path:
raise CommandException(
'The %s command cannot be used on cloud objects, only buckets' %
self.command_name)
for blr in self.WildcardIterator(path).IterBuckets(
bucket_fields=['id']):
for notification in self.gsutil_api.ListNotificationConfigs(
blr.storage_url.bucket_name, provider='gs'):
yield (blr.storage_url.bucket_name, notification)
def _List(self):
self.CheckArguments()
if self.sub_opts:
if '-o' in dict(self.sub_opts):
for bucket_name in self.args:
self._ListChannels(bucket_name)
else:
for bucket_name, notification in self._EnumerateNotificationsFromArgs(
accept_notification_configs=False):
self._PrintNotificationDetails(bucket_name, notification)
return 0
def _PrintNotificationDetails(self, bucket, notification):
print('projects/_/buckets/{bucket}/notificationConfigs/{notification}\n'
'\tCloud Pub/Sub topic: {topic}'.format(
bucket=bucket,
notification=notification.id,
topic=notification.topic[len('//pubsub.googleapis.com/'):]))
if notification.custom_attributes:
print('\tCustom attributes:')
for attr in notification.custom_attributes.additionalProperties:
print('\t\t%s: %s' % (attr.key, attr.value))
filters = []
if notification.event_types:
filters.append('\t\tEvent Types: %s' %
', '.join(notification.event_types))
if notification.object_name_prefix:
filters.append("\t\tObject name prefix: '%s'" %
notification.object_name_prefix)
if filters:
print('\tFilters:')
for line in filters:
print(line)
self.logger.info('')
def _Delete(self):
for bucket_name, notification in self._EnumerateNotificationsFromArgs():
self._DeleteNotification(bucket_name, notification.id)
return 0
def _DeleteNotification(self, bucket_name, notification_id):
self.gsutil_api.DeleteNotificationConfig(bucket_name,
notification=notification_id,
provider='gs')
return 0
def _RunSubCommand(self, func):
try:
(self.sub_opts,
self.args) = getopt.getopt(self.args,
self.command_spec.supported_sub_args)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
metrics.LogCommandParams(sub_opts=self.sub_opts)
return func(self)
except getopt.GetoptError:
self.RaiseInvalidArgumentException()
SUBCOMMANDS = {
'create': _Create,
'list': _List,
'delete': _Delete,
'watchbucket': _WatchBucket,
'stopchannel': _StopChannel
}
def RunCommand(self):
"""Command entry point for the notification command."""
self.subcommand_name = self.args.pop(0)
if self.subcommand_name in NotificationCommand.SUBCOMMANDS:
metrics.LogCommandParams(subcommands=[self.subcommand_name])
return self._RunSubCommand(
NotificationCommand.SUBCOMMANDS[self.subcommand_name])
else:
raise CommandException('Invalid subcommand "%s" for the %s command.' %
(self.subcommand_name, self.command_name))
|
plot_angle
|
Plot angle.
Args:
ax: matplotlib ax.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Twinboundary plot
This module provide various kinds of plot related to twin boudnary.
"""
import numpy as np
from copy import deepcopy
from twinpy.plot.base import line_chart
def plot_plane(ax,
distances:list,
z_coords:list,
label:str=None,
decorate:bool=True,
show_half:bool=False,
**kwargs):
"""
Plot plane.
Args:
ax: matplotlib ax.
distances (list): List of plane intervals.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
show_half: If True, atom planes which are periodically equivalent are
not showed.
"""
if decorate:
xlabel = 'Distance'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_distances = deepcopy(distances)
_z_coords = deepcopy(z_coords)
_distances.insert(0, distances[-1])
_distances.append(distances[0])
_z_coords.insert(0, -distances[-1])
_z_coords.append(z_coords[-1]+distances[0])
c = np.sum(distances)
fixed_z_coords = _z_coords + distances[0] / 2 - c / 2
num = len(fixed_z_coords)
bulk_distance = _distances[int(num/4)]
if show_half:
n = int((num + 2) / 4)
_distances = _distances[n:3*n]
fixed_z_coords = fixed_z_coords[n:3*n]
line_chart(ax=ax,
xdata=_distances,
ydata=fixed_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y',
**kwargs)
if decorate:
xmin = bulk_distance - 0.025
xmax = bulk_distance + 0.025
if show_half:
ax.hlines(0,
xmin=xmin-0.01,
xmax=xmax+0.01,
linestyle='--',
color='k',
linewidth=1.)
else:
tb_idx = [1, int(num/2), num-1]
for idx in tb_idx:
ax.hlines(fixed_z_coords[idx]-distances[0]/2,
xmin=xmin-0.01,
xmax=xmax+0.01,
linestyle='--',
color='k',
linewidth=1.)
# MASKED: plot_angle function (lines 86-128)
def plot_pair_distance(ax,
pair_distances:list,
z_coords:list,
label:str=None,
decorate:bool=True):
"""
Plot angle.
Args:
ax: matplotlib ax.
pair_distances (list): List of A-B pair distances, which is originally
primitive pair in HCP structure.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
"""
if decorate:
xlabel = 'Pair Distance'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_pair_distances = deepcopy(pair_distances)
_z_coords = deepcopy(z_coords)
_pair_distances.append(pair_distances[0])
_z_coords.append(z_coords[-1]+z_coords[1])
line_chart(ax=ax,
xdata=_pair_distances,
ydata=_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y')
if decorate:
num = len(_z_coords)
tb_idx = [0, int(num/2), num-1]
bulk_pair_distance = pair_distances[int(num/4)]
for idx in tb_idx:
ax.hlines(_z_coords[idx],
xmin=-1,
xmax=bulk_pair_distance+2,
linestyle='--',
linewidth=1.5)
|
def plot_angle(ax,
angles:list,
z_coords:list,
label:str=None,
decorate:bool=True):
"""
Plot angle.
Args:
ax: matplotlib ax.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
"""
if decorate:
xlabel = 'Angle'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_angles = deepcopy(angles)
_z_coords = deepcopy(z_coords)
_angles.append(angles[0])
_z_coords.append(z_coords[-1]+z_coords[1])
line_chart(ax=ax,
xdata=_angles,
ydata=_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y')
if decorate:
num = len(_z_coords)
tb_idx = [0, int(num/2), num-1]
bulk_angle = angles[int(num/4)]
for idx in tb_idx:
ax.hlines(_z_coords[idx],
xmin=-1,
xmax=bulk_angle+2,
linestyle='--',
linewidth=1.5)
| 86 | 128 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Twinboundary plot
This module provide various kinds of plot related to twin boudnary.
"""
import numpy as np
from copy import deepcopy
from twinpy.plot.base import line_chart
def plot_plane(ax,
distances:list,
z_coords:list,
label:str=None,
decorate:bool=True,
show_half:bool=False,
**kwargs):
"""
Plot plane.
Args:
ax: matplotlib ax.
distances (list): List of plane intervals.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
show_half: If True, atom planes which are periodically equivalent are
not showed.
"""
if decorate:
xlabel = 'Distance'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_distances = deepcopy(distances)
_z_coords = deepcopy(z_coords)
_distances.insert(0, distances[-1])
_distances.append(distances[0])
_z_coords.insert(0, -distances[-1])
_z_coords.append(z_coords[-1]+distances[0])
c = np.sum(distances)
fixed_z_coords = _z_coords + distances[0] / 2 - c / 2
num = len(fixed_z_coords)
bulk_distance = _distances[int(num/4)]
if show_half:
n = int((num + 2) / 4)
_distances = _distances[n:3*n]
fixed_z_coords = fixed_z_coords[n:3*n]
line_chart(ax=ax,
xdata=_distances,
ydata=fixed_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y',
**kwargs)
if decorate:
xmin = bulk_distance - 0.025
xmax = bulk_distance + 0.025
if show_half:
ax.hlines(0,
xmin=xmin-0.01,
xmax=xmax+0.01,
linestyle='--',
color='k',
linewidth=1.)
else:
tb_idx = [1, int(num/2), num-1]
for idx in tb_idx:
ax.hlines(fixed_z_coords[idx]-distances[0]/2,
xmin=xmin-0.01,
xmax=xmax+0.01,
linestyle='--',
color='k',
linewidth=1.)
def plot_angle(ax,
angles:list,
z_coords:list,
label:str=None,
decorate:bool=True):
"""
Plot angle.
Args:
ax: matplotlib ax.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
"""
if decorate:
xlabel = 'Angle'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_angles = deepcopy(angles)
_z_coords = deepcopy(z_coords)
_angles.append(angles[0])
_z_coords.append(z_coords[-1]+z_coords[1])
line_chart(ax=ax,
xdata=_angles,
ydata=_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y')
if decorate:
num = len(_z_coords)
tb_idx = [0, int(num/2), num-1]
bulk_angle = angles[int(num/4)]
for idx in tb_idx:
ax.hlines(_z_coords[idx],
xmin=-1,
xmax=bulk_angle+2,
linestyle='--',
linewidth=1.5)
def plot_pair_distance(ax,
pair_distances:list,
z_coords:list,
label:str=None,
decorate:bool=True):
"""
Plot angle.
Args:
ax: matplotlib ax.
pair_distances (list): List of A-B pair distances, which is originally
primitive pair in HCP structure.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
"""
if decorate:
xlabel = 'Pair Distance'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_pair_distances = deepcopy(pair_distances)
_z_coords = deepcopy(z_coords)
_pair_distances.append(pair_distances[0])
_z_coords.append(z_coords[-1]+z_coords[1])
line_chart(ax=ax,
xdata=_pair_distances,
ydata=_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y')
if decorate:
num = len(_z_coords)
tb_idx = [0, int(num/2), num-1]
bulk_pair_distance = pair_distances[int(num/4)]
for idx in tb_idx:
ax.hlines(_z_coords[idx],
xmin=-1,
xmax=bulk_pair_distance+2,
linestyle='--',
linewidth=1.5)
|
plot_pair_distance
|
Plot angle.
Args:
ax: matplotlib ax.
pair_distances (list): List of A-B pair distances, which is originally
primitive pair in HCP structure.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Twinboundary plot
This module provide various kinds of plot related to twin boudnary.
"""
import numpy as np
from copy import deepcopy
from twinpy.plot.base import line_chart
def plot_plane(ax,
distances:list,
z_coords:list,
label:str=None,
decorate:bool=True,
show_half:bool=False,
**kwargs):
"""
Plot plane.
Args:
ax: matplotlib ax.
distances (list): List of plane intervals.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
show_half: If True, atom planes which are periodically equivalent are
not showed.
"""
if decorate:
xlabel = 'Distance'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_distances = deepcopy(distances)
_z_coords = deepcopy(z_coords)
_distances.insert(0, distances[-1])
_distances.append(distances[0])
_z_coords.insert(0, -distances[-1])
_z_coords.append(z_coords[-1]+distances[0])
c = np.sum(distances)
fixed_z_coords = _z_coords + distances[0] / 2 - c / 2
num = len(fixed_z_coords)
bulk_distance = _distances[int(num/4)]
if show_half:
n = int((num + 2) / 4)
_distances = _distances[n:3*n]
fixed_z_coords = fixed_z_coords[n:3*n]
line_chart(ax=ax,
xdata=_distances,
ydata=fixed_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y',
**kwargs)
if decorate:
xmin = bulk_distance - 0.025
xmax = bulk_distance + 0.025
if show_half:
ax.hlines(0,
xmin=xmin-0.01,
xmax=xmax+0.01,
linestyle='--',
color='k',
linewidth=1.)
else:
tb_idx = [1, int(num/2), num-1]
for idx in tb_idx:
ax.hlines(fixed_z_coords[idx]-distances[0]/2,
xmin=xmin-0.01,
xmax=xmax+0.01,
linestyle='--',
color='k',
linewidth=1.)
def plot_angle(ax,
angles:list,
z_coords:list,
label:str=None,
decorate:bool=True):
"""
Plot angle.
Args:
ax: matplotlib ax.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
"""
if decorate:
xlabel = 'Angle'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_angles = deepcopy(angles)
_z_coords = deepcopy(z_coords)
_angles.append(angles[0])
_z_coords.append(z_coords[-1]+z_coords[1])
line_chart(ax=ax,
xdata=_angles,
ydata=_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y')
if decorate:
num = len(_z_coords)
tb_idx = [0, int(num/2), num-1]
bulk_angle = angles[int(num/4)]
for idx in tb_idx:
ax.hlines(_z_coords[idx],
xmin=-1,
xmax=bulk_angle+2,
linestyle='--',
linewidth=1.5)
# MASKED: plot_pair_distance function (lines 131-175)
|
def plot_pair_distance(ax,
pair_distances:list,
z_coords:list,
label:str=None,
decorate:bool=True):
"""
Plot angle.
Args:
ax: matplotlib ax.
pair_distances (list): List of A-B pair distances, which is originally
primitive pair in HCP structure.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
"""
if decorate:
xlabel = 'Pair Distance'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_pair_distances = deepcopy(pair_distances)
_z_coords = deepcopy(z_coords)
_pair_distances.append(pair_distances[0])
_z_coords.append(z_coords[-1]+z_coords[1])
line_chart(ax=ax,
xdata=_pair_distances,
ydata=_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y')
if decorate:
num = len(_z_coords)
tb_idx = [0, int(num/2), num-1]
bulk_pair_distance = pair_distances[int(num/4)]
for idx in tb_idx:
ax.hlines(_z_coords[idx],
xmin=-1,
xmax=bulk_pair_distance+2,
linestyle='--',
linewidth=1.5)
| 131 | 175 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Twinboundary plot
This module provide various kinds of plot related to twin boudnary.
"""
import numpy as np
from copy import deepcopy
from twinpy.plot.base import line_chart
def plot_plane(ax,
distances:list,
z_coords:list,
label:str=None,
decorate:bool=True,
show_half:bool=False,
**kwargs):
"""
Plot plane.
Args:
ax: matplotlib ax.
distances (list): List of plane intervals.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
show_half: If True, atom planes which are periodically equivalent are
not showed.
"""
if decorate:
xlabel = 'Distance'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_distances = deepcopy(distances)
_z_coords = deepcopy(z_coords)
_distances.insert(0, distances[-1])
_distances.append(distances[0])
_z_coords.insert(0, -distances[-1])
_z_coords.append(z_coords[-1]+distances[0])
c = np.sum(distances)
fixed_z_coords = _z_coords + distances[0] / 2 - c / 2
num = len(fixed_z_coords)
bulk_distance = _distances[int(num/4)]
if show_half:
n = int((num + 2) / 4)
_distances = _distances[n:3*n]
fixed_z_coords = fixed_z_coords[n:3*n]
line_chart(ax=ax,
xdata=_distances,
ydata=fixed_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y',
**kwargs)
if decorate:
xmin = bulk_distance - 0.025
xmax = bulk_distance + 0.025
if show_half:
ax.hlines(0,
xmin=xmin-0.01,
xmax=xmax+0.01,
linestyle='--',
color='k',
linewidth=1.)
else:
tb_idx = [1, int(num/2), num-1]
for idx in tb_idx:
ax.hlines(fixed_z_coords[idx]-distances[0]/2,
xmin=xmin-0.01,
xmax=xmax+0.01,
linestyle='--',
color='k',
linewidth=1.)
def plot_angle(ax,
angles:list,
z_coords:list,
label:str=None,
decorate:bool=True):
"""
Plot angle.
Args:
ax: matplotlib ax.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
"""
if decorate:
xlabel = 'Angle'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_angles = deepcopy(angles)
_z_coords = deepcopy(z_coords)
_angles.append(angles[0])
_z_coords.append(z_coords[-1]+z_coords[1])
line_chart(ax=ax,
xdata=_angles,
ydata=_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y')
if decorate:
num = len(_z_coords)
tb_idx = [0, int(num/2), num-1]
bulk_angle = angles[int(num/4)]
for idx in tb_idx:
ax.hlines(_z_coords[idx],
xmin=-1,
xmax=bulk_angle+2,
linestyle='--',
linewidth=1.5)
def plot_pair_distance(ax,
pair_distances:list,
z_coords:list,
label:str=None,
decorate:bool=True):
"""
Plot angle.
Args:
ax: matplotlib ax.
pair_distances (list): List of A-B pair distances, which is originally
primitive pair in HCP structure.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
"""
if decorate:
xlabel = 'Pair Distance'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_pair_distances = deepcopy(pair_distances)
_z_coords = deepcopy(z_coords)
_pair_distances.append(pair_distances[0])
_z_coords.append(z_coords[-1]+z_coords[1])
line_chart(ax=ax,
xdata=_pair_distances,
ydata=_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y')
if decorate:
num = len(_z_coords)
tb_idx = [0, int(num/2), num-1]
bulk_pair_distance = pair_distances[int(num/4)]
for idx in tb_idx:
ax.hlines(_z_coords[idx],
xmin=-1,
xmax=bulk_pair_distance+2,
linestyle='--',
linewidth=1.5)
|
single_gpu_test
|
Test model with single GPU, used for visualization.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
Returns:
dict: test results
|
"""
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : test.py
# Abstract : The common testing api for video text recognition, track, quality score
# Current Version: 1.0.0
# Date : 2021-06-02
##################################################################################################
"""
import numpy as np
import mmcv
import torch
# MASKED: single_gpu_test function (lines 17-56)
|
def single_gpu_test(model,
data_loader):
""" Test model with single GPU, used for visualization.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
Returns:
dict: test results
"""
model.eval()
results = dict()
results['texts'] = []
results['img_info'] = []
results['glimpses'] = []
results['scores'] = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
texts = result['text']
glimpses = result['glimpses']
glimpses = glimpses.cpu().numpy()
img_infos = result['img_info']
scores = result['scores']
scores = scores.cpu().numpy()
scores = scores.reshape(-1)
batch_size = len(texts)
results['texts'].extend(texts)
results['img_info'].extend(img_infos)
results['glimpses'].extend(glimpses)
results['scores'].extend(scores)
for _ in range(batch_size):
prog_bar.update()
new_glimpse = np.stack(results['glimpses'])
results['glimpses'] = new_glimpse
return results
| 17 | 56 |
"""
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : test.py
# Abstract : The common testing api for video text recognition, track, quality score
# Current Version: 1.0.0
# Date : 2021-06-02
##################################################################################################
"""
import numpy as np
import mmcv
import torch
def single_gpu_test(model,
data_loader):
""" Test model with single GPU, used for visualization.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
Returns:
dict: test results
"""
model.eval()
results = dict()
results['texts'] = []
results['img_info'] = []
results['glimpses'] = []
results['scores'] = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
texts = result['text']
glimpses = result['glimpses']
glimpses = glimpses.cpu().numpy()
img_infos = result['img_info']
scores = result['scores']
scores = scores.cpu().numpy()
scores = scores.reshape(-1)
batch_size = len(texts)
results['texts'].extend(texts)
results['img_info'].extend(img_infos)
results['glimpses'].extend(glimpses)
results['scores'].extend(scores)
for _ in range(batch_size):
prog_bar.update()
new_glimpse = np.stack(results['glimpses'])
results['glimpses'] = new_glimpse
return results
|
__init__
|
Constructor
Args:
Parameters
----------
svm_kernel: str {'linear', 'sigmoid', 'rbf'}
kernel used for classifier
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
time_windows: list of list of ints, shape = (N, 2)
time windows used, in seconds (default: [[2,5, 6]])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_type: str {"butter", "fir"}
Type of the filter
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
|
#!/usr/bin/env python3
'''
Model for Riemannian feature calculation and classification for EEG data
'''
import numpy as np
from sklearn.svm import LinearSVC, SVC
from riemannian_multiscale import RiemannianMultiscale, QuantizedRiemannianMultiscale
from filters import load_filterbank
from utilities import quantize
__author__ = "Michael Hersche, Tino Rellstab and Tibor Schneider"
__email__ = "[email protected],[email protected]"
DATA_PATH = "dataset/"
# QUANTIZED = True
# ONLY_2HZ_BANDS = True
class RiemannianModel():
""" Riemannian Model """
# MASKED: __init__ function (lines 23-93)
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
class QuantizedRiemannianModel():
""" QuantizedRiemannian Model """
def __init__(self, svm_c=0.1, fs=250, bands=None, riem_opt='Riemann', rho=0.1, filter_order=2,
random_state=None, num_bits=8, bitshift_scale=True):
""" Constructor
Parameters
----------
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
num_bits: int
Number of bits used for quantization
bitshift_scale: bool
if True, make sure that all scale factors between one part and the next is a bitshift
"""
self.num_bits = num_bits
self.bitshift_scale = bitshift_scale
# setup classifier
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype="butter")
# setup Time Windows
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) # !!!!!
# setup riemannian
self.riemannian = QuantizedRiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True, num_bits=num_bits,
bitshift_scale=bitshift_scale)
# prepare quantized weights and biases
self.scale_weight = 0
self.scale_bias = 0
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# prepare scale factors
self.riemannian.prepare_quantization(samples)
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
# quantize the classifier
self.scale_weight = max(self.scale_weight, np.abs(self.classifier.coef_).max())
weights = quantize(self.classifier.coef_, self.scale_weight, self.num_bits, do_round=True)
self.classifier.coef_ = weights
# do not quantize the bias, this one will be added in 32 bit, and quantization does not
# matter here...
# self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max())
# bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits,
# do_round=True)
# self.classifier.intercept_ = bias
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
def predict_with_intermediate(self, sample, verbose=True):
""" Predict some data
Parameters
----------
samples: np.array, size=(C, T)
training sample
Returns
-------
ordered dictionary including every intermediate result and the output
"""
if verbose:
print("Predict sample with intermediate matrices")
assert len(sample.shape) == 2
result = self.riemannian.onetrial_feature_with_intermediate(sample)
features = next(reversed(result.values()))
features = features.reshape(1, -1)
result["svm_result"] = self.classifier.decision_function(features)
result["prediction"] = self.classifier.predict(features)
return result
def get_data_dict(self):
""" Returns a nested dictionary containing all necessary data """
return {"num_bits": self.num_bits,
"bitshift_scale": self.bitshift_scale,
"SVM": {"weights": self.classifier.coef_,
"weight_scale": self.scale_weight,
"bias": self.classifier.intercept_},
"riemannian": self.riemannian.get_data_dict()}
|
def __init__(self, svm_kernel='linear', svm_c=0.1, fs=250, bands=None, time_windows=None,
riem_opt='Riemann', rho=0.1, filter_type='butter', filter_order=2,
random_state=None):
""" Constructor
Args:
Parameters
----------
svm_kernel: str {'linear', 'sigmoid', 'rbf'}
kernel used for classifier
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
time_windows: list of list of ints, shape = (N, 2)
time windows used, in seconds (default: [[2,5, 6]])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_type: str {"butter", "fir"}
Type of the filter
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
"""
# setup classifier
if svm_kernel == 'linear':
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
else:
self.classifier = SVC(C=svm_c, kernel=svm_kernel, degree=10, gamma='auto',
cache_size=10000, random_state=random_state)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype=filter_type)
# setup Time Windows
if time_windows is None:
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int)
else:
time_windows = (np.array(time_windows) * fs).astype(int)
# setup riemannian
self.riemannian = RiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True)
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
| 23 | 93 |
#!/usr/bin/env python3
'''
Model for Riemannian feature calculation and classification for EEG data
'''
import numpy as np
from sklearn.svm import LinearSVC, SVC
from riemannian_multiscale import RiemannianMultiscale, QuantizedRiemannianMultiscale
from filters import load_filterbank
from utilities import quantize
__author__ = "Michael Hersche, Tino Rellstab and Tibor Schneider"
__email__ = "[email protected],[email protected]"
DATA_PATH = "dataset/"
# QUANTIZED = True
# ONLY_2HZ_BANDS = True
class RiemannianModel():
""" Riemannian Model """
def __init__(self, svm_kernel='linear', svm_c=0.1, fs=250, bands=None, time_windows=None,
riem_opt='Riemann', rho=0.1, filter_type='butter', filter_order=2,
random_state=None):
""" Constructor
Args:
Parameters
----------
svm_kernel: str {'linear', 'sigmoid', 'rbf'}
kernel used for classifier
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
time_windows: list of list of ints, shape = (N, 2)
time windows used, in seconds (default: [[2,5, 6]])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_type: str {"butter", "fir"}
Type of the filter
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
"""
# setup classifier
if svm_kernel == 'linear':
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
else:
self.classifier = SVC(C=svm_c, kernel=svm_kernel, degree=10, gamma='auto',
cache_size=10000, random_state=random_state)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype=filter_type)
# setup Time Windows
if time_windows is None:
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int)
else:
time_windows = (np.array(time_windows) * fs).astype(int)
# setup riemannian
self.riemannian = RiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True)
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
class QuantizedRiemannianModel():
""" QuantizedRiemannian Model """
def __init__(self, svm_c=0.1, fs=250, bands=None, riem_opt='Riemann', rho=0.1, filter_order=2,
random_state=None, num_bits=8, bitshift_scale=True):
""" Constructor
Parameters
----------
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
num_bits: int
Number of bits used for quantization
bitshift_scale: bool
if True, make sure that all scale factors between one part and the next is a bitshift
"""
self.num_bits = num_bits
self.bitshift_scale = bitshift_scale
# setup classifier
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype="butter")
# setup Time Windows
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) # !!!!!
# setup riemannian
self.riemannian = QuantizedRiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True, num_bits=num_bits,
bitshift_scale=bitshift_scale)
# prepare quantized weights and biases
self.scale_weight = 0
self.scale_bias = 0
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# prepare scale factors
self.riemannian.prepare_quantization(samples)
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
# quantize the classifier
self.scale_weight = max(self.scale_weight, np.abs(self.classifier.coef_).max())
weights = quantize(self.classifier.coef_, self.scale_weight, self.num_bits, do_round=True)
self.classifier.coef_ = weights
# do not quantize the bias, this one will be added in 32 bit, and quantization does not
# matter here...
# self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max())
# bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits,
# do_round=True)
# self.classifier.intercept_ = bias
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
def predict_with_intermediate(self, sample, verbose=True):
""" Predict some data
Parameters
----------
samples: np.array, size=(C, T)
training sample
Returns
-------
ordered dictionary including every intermediate result and the output
"""
if verbose:
print("Predict sample with intermediate matrices")
assert len(sample.shape) == 2
result = self.riemannian.onetrial_feature_with_intermediate(sample)
features = next(reversed(result.values()))
features = features.reshape(1, -1)
result["svm_result"] = self.classifier.decision_function(features)
result["prediction"] = self.classifier.predict(features)
return result
def get_data_dict(self):
""" Returns a nested dictionary containing all necessary data """
return {"num_bits": self.num_bits,
"bitshift_scale": self.bitshift_scale,
"SVM": {"weights": self.classifier.coef_,
"weight_scale": self.scale_weight,
"bias": self.classifier.intercept_},
"riemannian": self.riemannian.get_data_dict()}
|
fit
|
Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
|
#!/usr/bin/env python3
'''
Model for Riemannian feature calculation and classification for EEG data
'''
import numpy as np
from sklearn.svm import LinearSVC, SVC
from riemannian_multiscale import RiemannianMultiscale, QuantizedRiemannianMultiscale
from filters import load_filterbank
from utilities import quantize
__author__ = "Michael Hersche, Tino Rellstab and Tibor Schneider"
__email__ = "[email protected],[email protected]"
DATA_PATH = "dataset/"
# QUANTIZED = True
# ONLY_2HZ_BANDS = True
class RiemannianModel():
""" Riemannian Model """
def __init__(self, svm_kernel='linear', svm_c=0.1, fs=250, bands=None, time_windows=None,
riem_opt='Riemann', rho=0.1, filter_type='butter', filter_order=2,
random_state=None):
""" Constructor
Args:
Parameters
----------
svm_kernel: str {'linear', 'sigmoid', 'rbf'}
kernel used for classifier
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
time_windows: list of list of ints, shape = (N, 2)
time windows used, in seconds (default: [[2,5, 6]])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_type: str {"butter", "fir"}
Type of the filter
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
"""
# setup classifier
if svm_kernel == 'linear':
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
else:
self.classifier = SVC(C=svm_c, kernel=svm_kernel, degree=10, gamma='auto',
cache_size=10000, random_state=random_state)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype=filter_type)
# setup Time Windows
if time_windows is None:
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int)
else:
time_windows = (np.array(time_windows) * fs).astype(int)
# setup riemannian
self.riemannian = RiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True)
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
# MASKED: fit function (lines 95-115)
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
class QuantizedRiemannianModel():
""" QuantizedRiemannian Model """
def __init__(self, svm_c=0.1, fs=250, bands=None, riem_opt='Riemann', rho=0.1, filter_order=2,
random_state=None, num_bits=8, bitshift_scale=True):
""" Constructor
Parameters
----------
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
num_bits: int
Number of bits used for quantization
bitshift_scale: bool
if True, make sure that all scale factors between one part and the next is a bitshift
"""
self.num_bits = num_bits
self.bitshift_scale = bitshift_scale
# setup classifier
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype="butter")
# setup Time Windows
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) # !!!!!
# setup riemannian
self.riemannian = QuantizedRiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True, num_bits=num_bits,
bitshift_scale=bitshift_scale)
# prepare quantized weights and biases
self.scale_weight = 0
self.scale_bias = 0
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# prepare scale factors
self.riemannian.prepare_quantization(samples)
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
# quantize the classifier
self.scale_weight = max(self.scale_weight, np.abs(self.classifier.coef_).max())
weights = quantize(self.classifier.coef_, self.scale_weight, self.num_bits, do_round=True)
self.classifier.coef_ = weights
# do not quantize the bias, this one will be added in 32 bit, and quantization does not
# matter here...
# self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max())
# bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits,
# do_round=True)
# self.classifier.intercept_ = bias
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
def predict_with_intermediate(self, sample, verbose=True):
""" Predict some data
Parameters
----------
samples: np.array, size=(C, T)
training sample
Returns
-------
ordered dictionary including every intermediate result and the output
"""
if verbose:
print("Predict sample with intermediate matrices")
assert len(sample.shape) == 2
result = self.riemannian.onetrial_feature_with_intermediate(sample)
features = next(reversed(result.values()))
features = features.reshape(1, -1)
result["svm_result"] = self.classifier.decision_function(features)
result["prediction"] = self.classifier.predict(features)
return result
def get_data_dict(self):
""" Returns a nested dictionary containing all necessary data """
return {"num_bits": self.num_bits,
"bitshift_scale": self.bitshift_scale,
"SVM": {"weights": self.classifier.coef_,
"weight_scale": self.scale_weight,
"bias": self.classifier.intercept_},
"riemannian": self.riemannian.get_data_dict()}
|
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
| 95 | 115 |
#!/usr/bin/env python3
'''
Model for Riemannian feature calculation and classification for EEG data
'''
import numpy as np
from sklearn.svm import LinearSVC, SVC
from riemannian_multiscale import RiemannianMultiscale, QuantizedRiemannianMultiscale
from filters import load_filterbank
from utilities import quantize
__author__ = "Michael Hersche, Tino Rellstab and Tibor Schneider"
__email__ = "[email protected],[email protected]"
DATA_PATH = "dataset/"
# QUANTIZED = True
# ONLY_2HZ_BANDS = True
class RiemannianModel():
""" Riemannian Model """
def __init__(self, svm_kernel='linear', svm_c=0.1, fs=250, bands=None, time_windows=None,
riem_opt='Riemann', rho=0.1, filter_type='butter', filter_order=2,
random_state=None):
""" Constructor
Args:
Parameters
----------
svm_kernel: str {'linear', 'sigmoid', 'rbf'}
kernel used for classifier
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
time_windows: list of list of ints, shape = (N, 2)
time windows used, in seconds (default: [[2,5, 6]])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_type: str {"butter", "fir"}
Type of the filter
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
"""
# setup classifier
if svm_kernel == 'linear':
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
else:
self.classifier = SVC(C=svm_c, kernel=svm_kernel, degree=10, gamma='auto',
cache_size=10000, random_state=random_state)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype=filter_type)
# setup Time Windows
if time_windows is None:
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int)
else:
time_windows = (np.array(time_windows) * fs).astype(int)
# setup riemannian
self.riemannian = RiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True)
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
class QuantizedRiemannianModel():
""" QuantizedRiemannian Model """
def __init__(self, svm_c=0.1, fs=250, bands=None, riem_opt='Riemann', rho=0.1, filter_order=2,
random_state=None, num_bits=8, bitshift_scale=True):
""" Constructor
Parameters
----------
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
num_bits: int
Number of bits used for quantization
bitshift_scale: bool
if True, make sure that all scale factors between one part and the next is a bitshift
"""
self.num_bits = num_bits
self.bitshift_scale = bitshift_scale
# setup classifier
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype="butter")
# setup Time Windows
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) # !!!!!
# setup riemannian
self.riemannian = QuantizedRiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True, num_bits=num_bits,
bitshift_scale=bitshift_scale)
# prepare quantized weights and biases
self.scale_weight = 0
self.scale_bias = 0
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# prepare scale factors
self.riemannian.prepare_quantization(samples)
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
# quantize the classifier
self.scale_weight = max(self.scale_weight, np.abs(self.classifier.coef_).max())
weights = quantize(self.classifier.coef_, self.scale_weight, self.num_bits, do_round=True)
self.classifier.coef_ = weights
# do not quantize the bias, this one will be added in 32 bit, and quantization does not
# matter here...
# self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max())
# bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits,
# do_round=True)
# self.classifier.intercept_ = bias
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
def predict_with_intermediate(self, sample, verbose=True):
""" Predict some data
Parameters
----------
samples: np.array, size=(C, T)
training sample
Returns
-------
ordered dictionary including every intermediate result and the output
"""
if verbose:
print("Predict sample with intermediate matrices")
assert len(sample.shape) == 2
result = self.riemannian.onetrial_feature_with_intermediate(sample)
features = next(reversed(result.values()))
features = features.reshape(1, -1)
result["svm_result"] = self.classifier.decision_function(features)
result["prediction"] = self.classifier.predict(features)
return result
def get_data_dict(self):
""" Returns a nested dictionary containing all necessary data """
return {"num_bits": self.num_bits,
"bitshift_scale": self.bitshift_scale,
"SVM": {"weights": self.classifier.coef_,
"weight_scale": self.scale_weight,
"bias": self.classifier.intercept_},
"riemannian": self.riemannian.get_data_dict()}
|
fit
|
Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
|
#!/usr/bin/env python3
'''
Model for Riemannian feature calculation and classification for EEG data
'''
import numpy as np
from sklearn.svm import LinearSVC, SVC
from riemannian_multiscale import RiemannianMultiscale, QuantizedRiemannianMultiscale
from filters import load_filterbank
from utilities import quantize
__author__ = "Michael Hersche, Tino Rellstab and Tibor Schneider"
__email__ = "[email protected],[email protected]"
DATA_PATH = "dataset/"
# QUANTIZED = True
# ONLY_2HZ_BANDS = True
class RiemannianModel():
""" Riemannian Model """
def __init__(self, svm_kernel='linear', svm_c=0.1, fs=250, bands=None, time_windows=None,
riem_opt='Riemann', rho=0.1, filter_type='butter', filter_order=2,
random_state=None):
""" Constructor
Args:
Parameters
----------
svm_kernel: str {'linear', 'sigmoid', 'rbf'}
kernel used for classifier
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
time_windows: list of list of ints, shape = (N, 2)
time windows used, in seconds (default: [[2,5, 6]])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_type: str {"butter", "fir"}
Type of the filter
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
"""
# setup classifier
if svm_kernel == 'linear':
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
else:
self.classifier = SVC(C=svm_c, kernel=svm_kernel, degree=10, gamma='auto',
cache_size=10000, random_state=random_state)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype=filter_type)
# setup Time Windows
if time_windows is None:
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int)
else:
time_windows = (np.array(time_windows) * fs).astype(int)
# setup riemannian
self.riemannian = RiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True)
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
class QuantizedRiemannianModel():
""" QuantizedRiemannian Model """
def __init__(self, svm_c=0.1, fs=250, bands=None, riem_opt='Riemann', rho=0.1, filter_order=2,
random_state=None, num_bits=8, bitshift_scale=True):
""" Constructor
Parameters
----------
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
num_bits: int
Number of bits used for quantization
bitshift_scale: bool
if True, make sure that all scale factors between one part and the next is a bitshift
"""
self.num_bits = num_bits
self.bitshift_scale = bitshift_scale
# setup classifier
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype="butter")
# setup Time Windows
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) # !!!!!
# setup riemannian
self.riemannian = QuantizedRiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True, num_bits=num_bits,
bitshift_scale=bitshift_scale)
# prepare quantized weights and biases
self.scale_weight = 0
self.scale_bias = 0
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
# MASKED: fit function (lines 224-252)
# do not quantize the bias, this one will be added in 32 bit, and quantization does not
# matter here...
# self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max())
# bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits,
# do_round=True)
# self.classifier.intercept_ = bias
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
def predict_with_intermediate(self, sample, verbose=True):
""" Predict some data
Parameters
----------
samples: np.array, size=(C, T)
training sample
Returns
-------
ordered dictionary including every intermediate result and the output
"""
if verbose:
print("Predict sample with intermediate matrices")
assert len(sample.shape) == 2
result = self.riemannian.onetrial_feature_with_intermediate(sample)
features = next(reversed(result.values()))
features = features.reshape(1, -1)
result["svm_result"] = self.classifier.decision_function(features)
result["prediction"] = self.classifier.predict(features)
return result
def get_data_dict(self):
""" Returns a nested dictionary containing all necessary data """
return {"num_bits": self.num_bits,
"bitshift_scale": self.bitshift_scale,
"SVM": {"weights": self.classifier.coef_,
"weight_scale": self.scale_weight,
"bias": self.classifier.intercept_},
"riemannian": self.riemannian.get_data_dict()}
|
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# prepare scale factors
self.riemannian.prepare_quantization(samples)
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
# quantize the classifier
self.scale_weight = max(self.scale_weight, np.abs(self.classifier.coef_).max())
weights = quantize(self.classifier.coef_, self.scale_weight, self.num_bits, do_round=True)
self.classifier.coef_ = weights
| 224 | 252 |
#!/usr/bin/env python3
'''
Model for Riemannian feature calculation and classification for EEG data
'''
import numpy as np
from sklearn.svm import LinearSVC, SVC
from riemannian_multiscale import RiemannianMultiscale, QuantizedRiemannianMultiscale
from filters import load_filterbank
from utilities import quantize
__author__ = "Michael Hersche, Tino Rellstab and Tibor Schneider"
__email__ = "[email protected],[email protected]"
DATA_PATH = "dataset/"
# QUANTIZED = True
# ONLY_2HZ_BANDS = True
class RiemannianModel():
""" Riemannian Model """
def __init__(self, svm_kernel='linear', svm_c=0.1, fs=250, bands=None, time_windows=None,
riem_opt='Riemann', rho=0.1, filter_type='butter', filter_order=2,
random_state=None):
""" Constructor
Args:
Parameters
----------
svm_kernel: str {'linear', 'sigmoid', 'rbf'}
kernel used for classifier
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
time_windows: list of list of ints, shape = (N, 2)
time windows used, in seconds (default: [[2,5, 6]])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_type: str {"butter", "fir"}
Type of the filter
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
"""
# setup classifier
if svm_kernel == 'linear':
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
else:
self.classifier = SVC(C=svm_c, kernel=svm_kernel, degree=10, gamma='auto',
cache_size=10000, random_state=random_state)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype=filter_type)
# setup Time Windows
if time_windows is None:
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int)
else:
time_windows = (np.array(time_windows) * fs).astype(int)
# setup riemannian
self.riemannian = RiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True)
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
class QuantizedRiemannianModel():
""" QuantizedRiemannian Model """
def __init__(self, svm_c=0.1, fs=250, bands=None, riem_opt='Riemann', rho=0.1, filter_order=2,
random_state=None, num_bits=8, bitshift_scale=True):
""" Constructor
Parameters
----------
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
num_bits: int
Number of bits used for quantization
bitshift_scale: bool
if True, make sure that all scale factors between one part and the next is a bitshift
"""
self.num_bits = num_bits
self.bitshift_scale = bitshift_scale
# setup classifier
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype="butter")
# setup Time Windows
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) # !!!!!
# setup riemannian
self.riemannian = QuantizedRiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True, num_bits=num_bits,
bitshift_scale=bitshift_scale)
# prepare quantized weights and biases
self.scale_weight = 0
self.scale_bias = 0
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# prepare scale factors
self.riemannian.prepare_quantization(samples)
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
# quantize the classifier
self.scale_weight = max(self.scale_weight, np.abs(self.classifier.coef_).max())
weights = quantize(self.classifier.coef_, self.scale_weight, self.num_bits, do_round=True)
self.classifier.coef_ = weights
# do not quantize the bias, this one will be added in 32 bit, and quantization does not
# matter here...
# self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max())
# bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits,
# do_round=True)
# self.classifier.intercept_ = bias
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
def predict_with_intermediate(self, sample, verbose=True):
""" Predict some data
Parameters
----------
samples: np.array, size=(C, T)
training sample
Returns
-------
ordered dictionary including every intermediate result and the output
"""
if verbose:
print("Predict sample with intermediate matrices")
assert len(sample.shape) == 2
result = self.riemannian.onetrial_feature_with_intermediate(sample)
features = next(reversed(result.values()))
features = features.reshape(1, -1)
result["svm_result"] = self.classifier.decision_function(features)
result["prediction"] = self.classifier.predict(features)
return result
def get_data_dict(self):
""" Returns a nested dictionary containing all necessary data """
return {"num_bits": self.num_bits,
"bitshift_scale": self.bitshift_scale,
"SVM": {"weights": self.classifier.coef_,
"weight_scale": self.scale_weight,
"bias": self.classifier.intercept_},
"riemannian": self.riemannian.get_data_dict()}
|
kind_from_path
|
Determine the file kind based on its name.
When called with base=True, it will return the base file type instead
of the explicit one. That is expected to return 'yaml' for any yaml files.
|
"""Utility functions related to file operations."""
import copy
import logging
import os
import subprocess
import sys
from argparse import Namespace
from collections import OrderedDict
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Set, Union
# import wcmatch
import wcmatch.pathlib
from wcmatch.wcmatch import RECURSIVE, WcMatch
from ansiblelint.config import BASE_KINDS, options
from ansiblelint.constants import FileType
if TYPE_CHECKING:
# https://github.com/PyCQA/pylint/issues/3979
BasePathLike = os.PathLike[Any] # pylint: disable=unsubscriptable-object
else:
BasePathLike = os.PathLike
_logger = logging.getLogger(__package__)
def normpath(path: Union[str, BasePathLike]) -> str:
"""
Normalize a path in order to provide a more consistent output.
Currently it generates a relative path but in the future we may want to
make this user configurable.
"""
# conversion to string in order to allow receiving non string objects
relpath = os.path.relpath(str(path))
abspath = os.path.abspath(str(path))
# we avoid returning relative paths that endup at root level
if abspath in relpath:
return abspath
return relpath
@contextmanager
def cwd(path: Union[str, BasePathLike]) -> Iterator[None]:
"""Context manager for temporary changing current working directory."""
old_pwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_pwd)
def expand_path_vars(path: str) -> str:
"""Expand the environment or ~ variables in a path string."""
# It may be possible for function to be called with a Path object
path = str(path).strip()
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return path
def expand_paths_vars(paths: List[str]) -> List[str]:
"""Expand the environment or ~ variables in a list."""
paths = [expand_path_vars(p) for p in paths]
return paths
# MASKED: kind_from_path function (lines 72-105)
class Lintable:
"""Defines a file/folder that can be linted.
Providing file content when creating the object allow creation of in-memory
instances that do not need files to be present on disk.
"""
def __init__(
self,
name: Union[str, Path],
content: Optional[str] = None,
kind: Optional[FileType] = None,
):
"""Create a Lintable instance."""
# Filename is effective file on disk, for stdin is a namedtempfile
self.filename: str = str(name)
self.dir: str = ""
self.kind: Optional[FileType] = None
if isinstance(name, str):
self.name = normpath(name)
self.path = Path(self.name)
else:
self.name = str(name)
self.path = name
self._content = content
# if the lintable is part of a role, we save role folder name
self.role = ""
parts = self.path.parent.parts
if 'roles' in parts:
role = self.path
while role.parent.name != "roles" and role.name:
role = role.parent
if role.exists:
self.role = role.name
if str(self.path) in ['/dev/stdin', '-']:
# pylint: disable=consider-using-with
self.file = NamedTemporaryFile(mode="w+", suffix="playbook.yml")
self.filename = self.file.name
self._content = sys.stdin.read()
self.file.write(self._content)
self.file.flush()
self.path = Path(self.file.name)
self.name = 'stdin'
self.kind = 'playbook'
self.dir = '/'
else:
self.kind = kind or kind_from_path(self.path)
# We store absolute directory in dir
if not self.dir:
if self.kind == "role":
self.dir = str(self.path.resolve())
else:
self.dir = str(self.path.parent.resolve())
# determine base file kind (yaml, xml, ini, ...)
self.base_kind = kind_from_path(self.path, base=True)
def __getitem__(self, key: Any) -> Any:
"""Provide compatibility subscriptable support."""
if key == 'path':
return str(self.path)
if key == 'type':
return str(self.kind)
raise NotImplementedError()
def get(self, key: Any, default: Any = None) -> Any:
"""Provide compatibility subscriptable support."""
try:
return self.__getitem__(key)
except NotImplementedError:
return default
@property
def content(self) -> str:
"""Retried file content, from internal cache or disk."""
if self._content is None:
with open(self.path, mode='r', encoding='utf-8') as f:
self._content = f.read()
return self._content
def __hash__(self) -> int:
"""Return a hash value of the lintables."""
return hash((self.name, self.kind))
def __eq__(self, other: object) -> bool:
"""Identify whether the other object represents the same rule match."""
if isinstance(other, Lintable):
return bool(self.name == other.name and self.kind == other.kind)
return False
def __repr__(self) -> str:
"""Return user friendly representation of a lintable."""
return f"{self.name} ({self.kind})"
def discover_lintables(options: Namespace) -> Dict[str, Any]:
"""Find all files that we know how to lint."""
# git is preferred as it also considers .gitignore
git_command = ['git', 'ls-files', '-z']
out = None
try:
out = subprocess.check_output(
git_command, stderr=subprocess.STDOUT, universal_newlines=True
).split("\x00")[:-1]
_logger.info("Discovered files to lint using: %s", ' '.join(git_command))
except subprocess.CalledProcessError as exc:
if not (exc.returncode == 128 and 'fatal: not a git repository' in exc.output):
_logger.warning(
"Failed to discover lintable files using git: %s",
exc.output.rstrip('\n'),
)
except FileNotFoundError as exc:
if options.verbosity:
_logger.warning("Failed to locate command: %s", exc)
if out is None:
exclude_pattern = "|".join(options.exclude_paths)
_logger.info("Looking up for files, excluding %s ...", exclude_pattern)
out = WcMatch('.', exclude_pattern=exclude_pattern, flags=RECURSIVE).match()
return OrderedDict.fromkeys(sorted(out))
def guess_project_dir() -> str:
"""Return detected project dir or user home directory."""
try:
result = subprocess.run(
["git", "rev-parse", "--show-toplevel"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
check=False,
)
except FileNotFoundError:
# if git is absent we use home directory
return str(Path.home())
if result.returncode != 0:
return str(Path.home())
return result.stdout.splitlines()[0]
def expand_dirs_in_lintables(lintables: Set[Lintable]) -> None:
"""Return all recognized lintables within given directory."""
should_expand = False
for item in lintables:
if item.path.is_dir():
should_expand = True
break
if should_expand:
# this relies on git and we do not want to call unless needed
all_files = discover_lintables(options)
for item in copy.copy(lintables):
if item.path.is_dir():
for filename in all_files:
if filename.startswith(str(item.path)):
lintables.add(Lintable(filename))
|
def kind_from_path(path: Path, base: bool = False) -> FileType:
"""Determine the file kind based on its name.
When called with base=True, it will return the base file type instead
of the explicit one. That is expected to return 'yaml' for any yaml files.
"""
# pathlib.Path.match patterns are very limited, they do not support *a*.yml
# glob.glob supports **/foo.yml but not multiple extensions
pathex = wcmatch.pathlib.PurePath(path.absolute().resolve())
kinds = options.kinds if not base else BASE_KINDS
for entry in kinds:
for k, v in entry.items():
if pathex.globmatch(
v,
flags=(
wcmatch.pathlib.GLOBSTAR
| wcmatch.pathlib.BRACE
| wcmatch.pathlib.DOTGLOB
),
):
return str(k) # type: ignore
if base:
# Unknown base file type is default
return ""
if path.is_dir():
return "role"
if str(path) == '/dev/stdin':
return "playbook"
# Unknown file types report a empty string (evaluated as False)
return ""
| 72 | 105 |
"""Utility functions related to file operations."""
import copy
import logging
import os
import subprocess
import sys
from argparse import Namespace
from collections import OrderedDict
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Set, Union
# import wcmatch
import wcmatch.pathlib
from wcmatch.wcmatch import RECURSIVE, WcMatch
from ansiblelint.config import BASE_KINDS, options
from ansiblelint.constants import FileType
if TYPE_CHECKING:
# https://github.com/PyCQA/pylint/issues/3979
BasePathLike = os.PathLike[Any] # pylint: disable=unsubscriptable-object
else:
BasePathLike = os.PathLike
_logger = logging.getLogger(__package__)
def normpath(path: Union[str, BasePathLike]) -> str:
"""
Normalize a path in order to provide a more consistent output.
Currently it generates a relative path but in the future we may want to
make this user configurable.
"""
# conversion to string in order to allow receiving non string objects
relpath = os.path.relpath(str(path))
abspath = os.path.abspath(str(path))
# we avoid returning relative paths that endup at root level
if abspath in relpath:
return abspath
return relpath
@contextmanager
def cwd(path: Union[str, BasePathLike]) -> Iterator[None]:
"""Context manager for temporary changing current working directory."""
old_pwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_pwd)
def expand_path_vars(path: str) -> str:
"""Expand the environment or ~ variables in a path string."""
# It may be possible for function to be called with a Path object
path = str(path).strip()
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return path
def expand_paths_vars(paths: List[str]) -> List[str]:
"""Expand the environment or ~ variables in a list."""
paths = [expand_path_vars(p) for p in paths]
return paths
def kind_from_path(path: Path, base: bool = False) -> FileType:
"""Determine the file kind based on its name.
When called with base=True, it will return the base file type instead
of the explicit one. That is expected to return 'yaml' for any yaml files.
"""
# pathlib.Path.match patterns are very limited, they do not support *a*.yml
# glob.glob supports **/foo.yml but not multiple extensions
pathex = wcmatch.pathlib.PurePath(path.absolute().resolve())
kinds = options.kinds if not base else BASE_KINDS
for entry in kinds:
for k, v in entry.items():
if pathex.globmatch(
v,
flags=(
wcmatch.pathlib.GLOBSTAR
| wcmatch.pathlib.BRACE
| wcmatch.pathlib.DOTGLOB
),
):
return str(k) # type: ignore
if base:
# Unknown base file type is default
return ""
if path.is_dir():
return "role"
if str(path) == '/dev/stdin':
return "playbook"
# Unknown file types report a empty string (evaluated as False)
return ""
class Lintable:
"""Defines a file/folder that can be linted.
Providing file content when creating the object allow creation of in-memory
instances that do not need files to be present on disk.
"""
def __init__(
self,
name: Union[str, Path],
content: Optional[str] = None,
kind: Optional[FileType] = None,
):
"""Create a Lintable instance."""
# Filename is effective file on disk, for stdin is a namedtempfile
self.filename: str = str(name)
self.dir: str = ""
self.kind: Optional[FileType] = None
if isinstance(name, str):
self.name = normpath(name)
self.path = Path(self.name)
else:
self.name = str(name)
self.path = name
self._content = content
# if the lintable is part of a role, we save role folder name
self.role = ""
parts = self.path.parent.parts
if 'roles' in parts:
role = self.path
while role.parent.name != "roles" and role.name:
role = role.parent
if role.exists:
self.role = role.name
if str(self.path) in ['/dev/stdin', '-']:
# pylint: disable=consider-using-with
self.file = NamedTemporaryFile(mode="w+", suffix="playbook.yml")
self.filename = self.file.name
self._content = sys.stdin.read()
self.file.write(self._content)
self.file.flush()
self.path = Path(self.file.name)
self.name = 'stdin'
self.kind = 'playbook'
self.dir = '/'
else:
self.kind = kind or kind_from_path(self.path)
# We store absolute directory in dir
if not self.dir:
if self.kind == "role":
self.dir = str(self.path.resolve())
else:
self.dir = str(self.path.parent.resolve())
# determine base file kind (yaml, xml, ini, ...)
self.base_kind = kind_from_path(self.path, base=True)
def __getitem__(self, key: Any) -> Any:
"""Provide compatibility subscriptable support."""
if key == 'path':
return str(self.path)
if key == 'type':
return str(self.kind)
raise NotImplementedError()
def get(self, key: Any, default: Any = None) -> Any:
"""Provide compatibility subscriptable support."""
try:
return self.__getitem__(key)
except NotImplementedError:
return default
@property
def content(self) -> str:
"""Retried file content, from internal cache or disk."""
if self._content is None:
with open(self.path, mode='r', encoding='utf-8') as f:
self._content = f.read()
return self._content
def __hash__(self) -> int:
"""Return a hash value of the lintables."""
return hash((self.name, self.kind))
def __eq__(self, other: object) -> bool:
"""Identify whether the other object represents the same rule match."""
if isinstance(other, Lintable):
return bool(self.name == other.name and self.kind == other.kind)
return False
def __repr__(self) -> str:
"""Return user friendly representation of a lintable."""
return f"{self.name} ({self.kind})"
def discover_lintables(options: Namespace) -> Dict[str, Any]:
"""Find all files that we know how to lint."""
# git is preferred as it also considers .gitignore
git_command = ['git', 'ls-files', '-z']
out = None
try:
out = subprocess.check_output(
git_command, stderr=subprocess.STDOUT, universal_newlines=True
).split("\x00")[:-1]
_logger.info("Discovered files to lint using: %s", ' '.join(git_command))
except subprocess.CalledProcessError as exc:
if not (exc.returncode == 128 and 'fatal: not a git repository' in exc.output):
_logger.warning(
"Failed to discover lintable files using git: %s",
exc.output.rstrip('\n'),
)
except FileNotFoundError as exc:
if options.verbosity:
_logger.warning("Failed to locate command: %s", exc)
if out is None:
exclude_pattern = "|".join(options.exclude_paths)
_logger.info("Looking up for files, excluding %s ...", exclude_pattern)
out = WcMatch('.', exclude_pattern=exclude_pattern, flags=RECURSIVE).match()
return OrderedDict.fromkeys(sorted(out))
def guess_project_dir() -> str:
"""Return detected project dir or user home directory."""
try:
result = subprocess.run(
["git", "rev-parse", "--show-toplevel"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
check=False,
)
except FileNotFoundError:
# if git is absent we use home directory
return str(Path.home())
if result.returncode != 0:
return str(Path.home())
return result.stdout.splitlines()[0]
def expand_dirs_in_lintables(lintables: Set[Lintable]) -> None:
"""Return all recognized lintables within given directory."""
should_expand = False
for item in lintables:
if item.path.is_dir():
should_expand = True
break
if should_expand:
# this relies on git and we do not want to call unless needed
all_files = discover_lintables(options)
for item in copy.copy(lintables):
if item.path.is_dir():
for filename in all_files:
if filename.startswith(str(item.path)):
lintables.add(Lintable(filename))
|
advanced_open
|
Open function interface for files with different extensions.
Parameters
----------
filepath: str
File path with extension.
args: list
Non-key arguments
kwargs: dict
Key arguments
Returns
-------
|
# -*- coding: utf-8 -*-
import gzip
import bz2
import numpy as np
# MASKED: advanced_open function (lines 8-30)
def load_kg_file(filepath, separator="\t", as_stream=False):
""" Import knowledge graph from file
Parameters
----------
filepath: str
File path
separator: str
File column separator
Returns
-------
iterator
The knowledge graph triplets obtained from the files with size [?, 3]
"""
kg_triples = []
with advanced_open(filepath) as file_content:
for line in file_content:
kg_triples.append(line.strip().split(separator))
return np.array(kg_triples)
def load_kg_file_as_stream(filepath, separator="\t"):
""" Import knowledge graph from file as a stream
Parameters
----------
filepath: str
File path
separator: str
File column separator
Returns
-------
generator
The knowledge graph triplets obtained from the files with size [?, 3]
"""
with advanced_open(filepath) as file_content:
for line in file_content:
yield line.strip().split(separator)
|
def advanced_open(filepath, *args, **kwargs):
""" Open function interface for files with different extensions.
Parameters
----------
filepath: str
File path with extension.
args: list
Non-key arguments
kwargs: dict
Key arguments
Returns
-------
"""
open_fn = open
if filepath.endswith('.gz'):
open_fn = gzip.open
elif filepath.endswith('.bz2'):
open_fn = bz2.open
return open_fn(filepath, mode="rt", *args, **kwargs)
| 8 | 30 |
# -*- coding: utf-8 -*-
import gzip
import bz2
import numpy as np
def advanced_open(filepath, *args, **kwargs):
""" Open function interface for files with different extensions.
Parameters
----------
filepath: str
File path with extension.
args: list
Non-key arguments
kwargs: dict
Key arguments
Returns
-------
"""
open_fn = open
if filepath.endswith('.gz'):
open_fn = gzip.open
elif filepath.endswith('.bz2'):
open_fn = bz2.open
return open_fn(filepath, mode="rt", *args, **kwargs)
def load_kg_file(filepath, separator="\t", as_stream=False):
""" Import knowledge graph from file
Parameters
----------
filepath: str
File path
separator: str
File column separator
Returns
-------
iterator
The knowledge graph triplets obtained from the files with size [?, 3]
"""
kg_triples = []
with advanced_open(filepath) as file_content:
for line in file_content:
kg_triples.append(line.strip().split(separator))
return np.array(kg_triples)
def load_kg_file_as_stream(filepath, separator="\t"):
""" Import knowledge graph from file as a stream
Parameters
----------
filepath: str
File path
separator: str
File column separator
Returns
-------
generator
The knowledge graph triplets obtained from the files with size [?, 3]
"""
with advanced_open(filepath) as file_content:
for line in file_content:
yield line.strip().split(separator)
|
prep_and_send
|
Calculates measurements (cups and gallons). Prepares the data into a database-friendly tuple. Appends that tuple to a list.
It then tries to connect to database. If it is not successful then it does nothing but saves the data; it will try to send
the list of data-tuples the next time there is a water-flow event.
Once the connection is successful data is emptied in commit_data().
|
import RPi.GPIO as GPIO
import time,sys, datetime, json, requests
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
'''
Configure raspberry
'''
GPIO.setmode(GPIO.BCM)
inpt = 13
GPIO.setup(inpt,GPIO.IN)
'''
Configure some global variables
'''
current_input = GPIO.input(inpt) # This is used to compare to the new_input later.
total_rotations = 0 # This is a counter. It gets reset after the number of seconds in rotation_downtime.
cup_movements = 200 # This is how many rotations occur as a cup of liquid passes through.
rotation_downtime = 5 # Sets the cut-off time for establishing a water-flow event.
last_movement_time = time.time() + rotation_downtime # This is used to determine if a new water-flow event should be created.
record_data = False # A flag used to trigger database insert.
data = []
print('Control C to exit')
def commit_data(data):
'''
This passes data to the data base as a single row. It then resets/empties data.
'''
url = 'http://localhost:1880/sensor'
headers = {
'Accepts': 'application/json'
}
print(f"1: {data[0]}")
send_jsn = json.dumps({"Movements": data[0][1], "Cups": data[0][2], "Gallons": data[0][3], "Liters": data[0][4]})
try:
response = requests.post(url, data=send_jsn, headers=headers)
print(response.text)
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
data = []
return data
# MASKED: prep_and_send function (lines 52-81)
while True:
'''
This is what actually runs the whole time.
It first checks to see if new_input is different from current_input. This would be the case if there was a rotation.
Once it detects that the input is different it knows water is flowing.
It starts tracking the total_rotations and when the last rotation occured.
After each rotation it refreshes the value of the last rotation time.
It waits a few seconds (rotation_downtime) after the last rotation time to make sure the water has stopped.
Once the water stops it passes the total_rotations to prep_and_send().
It also passes 'data' which is any previous water-flow events that were not successfully sent at the time they were recorded.
'''
new_input = GPIO.input(inpt)
if new_input != current_input:
total_rotations += 1
if time.time() <= last_movement_time: #if it hasn't been more than 10 seconds
record_data = True
current_input = new_input
last_movement_time = time.time() + rotation_downtime
else: #flow starts
last_movement_time = time.time() + rotation_downtime
elif record_data == True and time.time() > last_movement_time: #if it's been x seconds since last change
data = prep_and_send(data,total_rotations)
record_data = False
total_rotations = 0
last_movement_time = time.time() + rotation_downtime
current_input = new_input
try:
None
#print('New input: ',new_input, '. Current input: ', current_input, '. Movements: ', total_rotations)
except KeyboardInterrupt:
print('\nCTRL C - Exiting nicely')
GPIO.cleanup()
sys.exit()
|
def prep_and_send(data,total_rotations):
'''
Calculates measurements (cups and gallons). Prepares the data into a database-friendly tuple. Appends that tuple to a list.
It then tries to connect to database. If it is not successful then it does nothing but saves the data; it will try to send
the list of data-tuples the next time there is a water-flow event.
Once the connection is successful data is emptied in commit_data().
'''
total_cups = total_rotations/cup_movements
total_gallons = total_cups/16
total_liters = total_gallons*3.78541
now = datetime.datetime.now()
print('{}: Movements: {}. \nCups: {}. \nGallons: {}. \nLiters: {}'.format(now,total_rotations,total_cups,total_gallons,total_liters))
current_data = (
now,
round(total_rotations,2),
round(total_cups,2),
round(total_gallons,2),
round(total_liters,2),
)
data.append(current_data)
print(f"datos: {data}")
data = commit_data(data)
return data
| 52 | 81 |
import RPi.GPIO as GPIO
import time,sys, datetime, json, requests
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
'''
Configure raspberry
'''
GPIO.setmode(GPIO.BCM)
inpt = 13
GPIO.setup(inpt,GPIO.IN)
'''
Configure some global variables
'''
current_input = GPIO.input(inpt) # This is used to compare to the new_input later.
total_rotations = 0 # This is a counter. It gets reset after the number of seconds in rotation_downtime.
cup_movements = 200 # This is how many rotations occur as a cup of liquid passes through.
rotation_downtime = 5 # Sets the cut-off time for establishing a water-flow event.
last_movement_time = time.time() + rotation_downtime # This is used to determine if a new water-flow event should be created.
record_data = False # A flag used to trigger database insert.
data = []
print('Control C to exit')
def commit_data(data):
'''
This passes data to the data base as a single row. It then resets/empties data.
'''
url = 'http://localhost:1880/sensor'
headers = {
'Accepts': 'application/json'
}
print(f"1: {data[0]}")
send_jsn = json.dumps({"Movements": data[0][1], "Cups": data[0][2], "Gallons": data[0][3], "Liters": data[0][4]})
try:
response = requests.post(url, data=send_jsn, headers=headers)
print(response.text)
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
data = []
return data
def prep_and_send(data,total_rotations):
'''
Calculates measurements (cups and gallons). Prepares the data into a database-friendly tuple. Appends that tuple to a list.
It then tries to connect to database. If it is not successful then it does nothing but saves the data; it will try to send
the list of data-tuples the next time there is a water-flow event.
Once the connection is successful data is emptied in commit_data().
'''
total_cups = total_rotations/cup_movements
total_gallons = total_cups/16
total_liters = total_gallons*3.78541
now = datetime.datetime.now()
print('{}: Movements: {}. \nCups: {}. \nGallons: {}. \nLiters: {}'.format(now,total_rotations,total_cups,total_gallons,total_liters))
current_data = (
now,
round(total_rotations,2),
round(total_cups,2),
round(total_gallons,2),
round(total_liters,2),
)
data.append(current_data)
print(f"datos: {data}")
data = commit_data(data)
return data
while True:
'''
This is what actually runs the whole time.
It first checks to see if new_input is different from current_input. This would be the case if there was a rotation.
Once it detects that the input is different it knows water is flowing.
It starts tracking the total_rotations and when the last rotation occured.
After each rotation it refreshes the value of the last rotation time.
It waits a few seconds (rotation_downtime) after the last rotation time to make sure the water has stopped.
Once the water stops it passes the total_rotations to prep_and_send().
It also passes 'data' which is any previous water-flow events that were not successfully sent at the time they were recorded.
'''
new_input = GPIO.input(inpt)
if new_input != current_input:
total_rotations += 1
if time.time() <= last_movement_time: #if it hasn't been more than 10 seconds
record_data = True
current_input = new_input
last_movement_time = time.time() + rotation_downtime
else: #flow starts
last_movement_time = time.time() + rotation_downtime
elif record_data == True and time.time() > last_movement_time: #if it's been x seconds since last change
data = prep_and_send(data,total_rotations)
record_data = False
total_rotations = 0
last_movement_time = time.time() + rotation_downtime
current_input = new_input
try:
None
#print('New input: ',new_input, '. Current input: ', current_input, '. Movements: ', total_rotations)
except KeyboardInterrupt:
print('\nCTRL C - Exiting nicely')
GPIO.cleanup()
sys.exit()
|
count_samples
|
Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
|
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
# MASKED: count_samples function (lines 32-52)
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
| 32 | 52 |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
logz
|
Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
|
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
# MASKED: logz function (lines 55-75)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
| 55 | 75 |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
evidence
|
Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
|
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
# MASKED: evidence function (lines 78-98)
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
| 78 | 98 |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
param_mean
|
Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
|
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
# MASKED: param_mean function (lines 101-138)
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
| 101 | 138 |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
param_cred
|
One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
|
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
# MASKED: param_cred function (lines 141-173)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
| 141 | 173 |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
param_squared_mean
|
Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
|
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
# MASKED: param_squared_mean function (lines 176-203)
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
| 176 | 203 |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
r_mean
|
Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
|
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
# MASKED: r_mean function (lines 206-228)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
| 206 | 228 |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
r_cred
|
One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
|
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
# MASKED: r_cred function (lines 231-258)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
| 231 | 258 |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
get_latex_name
|
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
|
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
# MASKED: get_latex_name function (lines 265-316)
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
| 265 | 316 |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
weighted_quantile
|
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
|
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
# MASKED: weighted_quantile function (lines 319-347)
|
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
| 319 | 347 |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
|
_is_wellformed_user_properties
|
Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
|
"""Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
"""
import itertools
import os
import re
import sqlite3
from contextlib import contextmanager
from json import loads as _loads
from urllib.parse import quote as urllib_parse_quote
from ._exceptions import ToronError
sqlite3.register_converter('TEXT_JSON', _loads)
sqlite3.register_converter('TEXT_ATTRIBUTES', _loads)
def _is_sqlite_json1_enabled():
"""Check if SQLite implementation includes JSON1 extension."""
# The inclusion of JSON functions is optional when compiling SQLite.
# In versions 3.38.0 and newer, JSON functions are included by
# default but can be disabled (opt-out policy). For older versions
# of SQLite, JSON functions are available on an opt-in basis. It is
# necessary to test for their presence rathern than referencing the
# SQLite version number.
#
# For more information, see:
# https://www.sqlite.org/json1.html#compiling_in_json_support
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True
SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()
_schema_script = """
PRAGMA foreign_keys = ON;
CREATE TABLE edge(
edge_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
user_properties TEXT_USERPROPERTIES,
other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,
other_filename_hint TEXT NOT NULL,
other_element_hash TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name, other_uuid)
);
CREATE TABLE relation(
relation_id INTEGER PRIMARY KEY,
edge_id INTEGER,
other_element_id INTEGER NOT NULL,
element_id INTEGER,
proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,
mapping_level INTEGER NOT NULL,
FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (edge_id, other_element_id, element_id)
);
CREATE TABLE element(
element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */
/* label columns added programmatically */
);
CREATE TABLE location(
_location_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE structure(
_structure_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE quantity(
quantity_id INTEGER PRIMARY KEY,
_location_id INTEGER,
attributes TEXT_ATTRIBUTES NOT NULL,
value NUMERIC NOT NULL,
FOREIGN KEY(_location_id) REFERENCES location(_location_id)
);
CREATE TABLE weight(
weight_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name)
);
CREATE TABLE element_weight(
element_weight_id INTEGER PRIMARY KEY,
weight_id INTEGER,
element_id INTEGER,
value REAL NOT NULL,
FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (element_id, weight_id)
);
CREATE TABLE property(
key TEXT PRIMARY KEY NOT NULL,
value TEXT_JSON
);
INSERT INTO property VALUES ('schema_version', '1');
"""
def _is_wellformed_json(x):
"""Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
"""
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1
def _make_trigger_for_json(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
'''
# MASKED: _is_wellformed_user_properties function (lines 213-230)
def _make_trigger_for_user_properties(insert_or_update, table, column):
"""Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
"""
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
'''
def _is_wellformed_attributes(x):
"""Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if not isinstance(obj, dict):
return 0
for value in obj.values():
if not isinstance(value, str):
return 0
return 1
def _make_trigger_for_attributes(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
'''
def _add_functions_and_triggers(connection):
"""Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
"""
if not SQLITE_JSON1_ENABLED:
try:
connection.create_function(
'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function(
'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function(
'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [
('edge', 'type_info'),
('quantity', 'attributes'),
('weight', 'type_info'),
]
for table, column in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column))
def _path_to_sqlite_uri(path):
"""Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
"""
if os.name == 'nt': # Windows
if re.match(r'^[a-zA-Z]:', path):
path = os.path.abspath(path) # If drive-letter, must be absolute.
drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter.
path = path[2:]
else:
drive_prefix = ''
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}'
def connect(path, mode='rwc'):
"""Returns a sqlite3 connection to a Toron node file."""
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = lambda: sqlite3.connect(
database=uri_path,
detect_types=sqlite3.PARSE_DECLTYPES,
isolation_level=None,
uri=True,
)
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script) # Create database schema.
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
# Raises OperationalError when *path* is a database with an unknown
# schema and DatabaseError when *path* is a file but not a database.
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
schema_version, *_ = cur.fetchone() or (None,)
cur.close()
if schema_version != 1: # When schema version is unsupported.
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con
_SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count())
class savepoint(object):
"""Context manager to wrap a block of code inside a SAVEPOINT.
If the block exists without errors, the SAVEPOINT is released
and the changes are committed. If an error occurs, all of the
changes are rolled back:
cur = con.cursor()
with savepoint(cur):
cur.execute(...)
"""
def __init__(self, cursor):
if cursor.connection.isolation_level is not None:
isolation_level = cursor.connection.isolation_level
msg = (
f'isolation_level must be None, got: {isolation_level!r}\n'
'\n'
'For explicit transaction handling, the connection must '
'be operating in "autocommit" mode. Turn on autocommit '
'mode by setting "con.isolation_level = None".'
)
raise sqlite3.OperationalError(msg)
self.name = next(_SAVEPOINT_NAME_GENERATOR)
self.cursor = cursor
def __enter__(self):
self.cursor.execute(f'SAVEPOINT {self.name}')
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.cursor.execute(f'RELEASE {self.name}')
else:
self.cursor.execute(f'ROLLBACK TO {self.name}')
@contextmanager
def transaction(path_or_connection, mode=None):
"""A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
"""
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = lambda: None # Don't close already-existing cursor.
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
yield cursor
finally:
cursor.close()
connection_close()
|
def _is_wellformed_user_properties(x):
"""Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0
| 213 | 230 |
"""Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
"""
import itertools
import os
import re
import sqlite3
from contextlib import contextmanager
from json import loads as _loads
from urllib.parse import quote as urllib_parse_quote
from ._exceptions import ToronError
sqlite3.register_converter('TEXT_JSON', _loads)
sqlite3.register_converter('TEXT_ATTRIBUTES', _loads)
def _is_sqlite_json1_enabled():
"""Check if SQLite implementation includes JSON1 extension."""
# The inclusion of JSON functions is optional when compiling SQLite.
# In versions 3.38.0 and newer, JSON functions are included by
# default but can be disabled (opt-out policy). For older versions
# of SQLite, JSON functions are available on an opt-in basis. It is
# necessary to test for their presence rathern than referencing the
# SQLite version number.
#
# For more information, see:
# https://www.sqlite.org/json1.html#compiling_in_json_support
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True
SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()
_schema_script = """
PRAGMA foreign_keys = ON;
CREATE TABLE edge(
edge_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
user_properties TEXT_USERPROPERTIES,
other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,
other_filename_hint TEXT NOT NULL,
other_element_hash TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name, other_uuid)
);
CREATE TABLE relation(
relation_id INTEGER PRIMARY KEY,
edge_id INTEGER,
other_element_id INTEGER NOT NULL,
element_id INTEGER,
proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,
mapping_level INTEGER NOT NULL,
FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (edge_id, other_element_id, element_id)
);
CREATE TABLE element(
element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */
/* label columns added programmatically */
);
CREATE TABLE location(
_location_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE structure(
_structure_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE quantity(
quantity_id INTEGER PRIMARY KEY,
_location_id INTEGER,
attributes TEXT_ATTRIBUTES NOT NULL,
value NUMERIC NOT NULL,
FOREIGN KEY(_location_id) REFERENCES location(_location_id)
);
CREATE TABLE weight(
weight_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name)
);
CREATE TABLE element_weight(
element_weight_id INTEGER PRIMARY KEY,
weight_id INTEGER,
element_id INTEGER,
value REAL NOT NULL,
FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (element_id, weight_id)
);
CREATE TABLE property(
key TEXT PRIMARY KEY NOT NULL,
value TEXT_JSON
);
INSERT INTO property VALUES ('schema_version', '1');
"""
def _is_wellformed_json(x):
"""Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
"""
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1
def _make_trigger_for_json(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
'''
def _is_wellformed_user_properties(x):
"""Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0
def _make_trigger_for_user_properties(insert_or_update, table, column):
"""Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
"""
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
'''
def _is_wellformed_attributes(x):
"""Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if not isinstance(obj, dict):
return 0
for value in obj.values():
if not isinstance(value, str):
return 0
return 1
def _make_trigger_for_attributes(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
'''
def _add_functions_and_triggers(connection):
"""Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
"""
if not SQLITE_JSON1_ENABLED:
try:
connection.create_function(
'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function(
'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function(
'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [
('edge', 'type_info'),
('quantity', 'attributes'),
('weight', 'type_info'),
]
for table, column in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column))
def _path_to_sqlite_uri(path):
"""Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
"""
if os.name == 'nt': # Windows
if re.match(r'^[a-zA-Z]:', path):
path = os.path.abspath(path) # If drive-letter, must be absolute.
drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter.
path = path[2:]
else:
drive_prefix = ''
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}'
def connect(path, mode='rwc'):
"""Returns a sqlite3 connection to a Toron node file."""
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = lambda: sqlite3.connect(
database=uri_path,
detect_types=sqlite3.PARSE_DECLTYPES,
isolation_level=None,
uri=True,
)
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script) # Create database schema.
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
# Raises OperationalError when *path* is a database with an unknown
# schema and DatabaseError when *path* is a file but not a database.
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
schema_version, *_ = cur.fetchone() or (None,)
cur.close()
if schema_version != 1: # When schema version is unsupported.
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con
_SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count())
class savepoint(object):
"""Context manager to wrap a block of code inside a SAVEPOINT.
If the block exists without errors, the SAVEPOINT is released
and the changes are committed. If an error occurs, all of the
changes are rolled back:
cur = con.cursor()
with savepoint(cur):
cur.execute(...)
"""
def __init__(self, cursor):
if cursor.connection.isolation_level is not None:
isolation_level = cursor.connection.isolation_level
msg = (
f'isolation_level must be None, got: {isolation_level!r}\n'
'\n'
'For explicit transaction handling, the connection must '
'be operating in "autocommit" mode. Turn on autocommit '
'mode by setting "con.isolation_level = None".'
)
raise sqlite3.OperationalError(msg)
self.name = next(_SAVEPOINT_NAME_GENERATOR)
self.cursor = cursor
def __enter__(self):
self.cursor.execute(f'SAVEPOINT {self.name}')
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.cursor.execute(f'RELEASE {self.name}')
else:
self.cursor.execute(f'ROLLBACK TO {self.name}')
@contextmanager
def transaction(path_or_connection, mode=None):
"""A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
"""
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = lambda: None # Don't close already-existing cursor.
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
yield cursor
finally:
cursor.close()
connection_close()
|
_is_wellformed_attributes
|
Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
|
"""Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
"""
import itertools
import os
import re
import sqlite3
from contextlib import contextmanager
from json import loads as _loads
from urllib.parse import quote as urllib_parse_quote
from ._exceptions import ToronError
sqlite3.register_converter('TEXT_JSON', _loads)
sqlite3.register_converter('TEXT_ATTRIBUTES', _loads)
def _is_sqlite_json1_enabled():
"""Check if SQLite implementation includes JSON1 extension."""
# The inclusion of JSON functions is optional when compiling SQLite.
# In versions 3.38.0 and newer, JSON functions are included by
# default but can be disabled (opt-out policy). For older versions
# of SQLite, JSON functions are available on an opt-in basis. It is
# necessary to test for their presence rathern than referencing the
# SQLite version number.
#
# For more information, see:
# https://www.sqlite.org/json1.html#compiling_in_json_support
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True
SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()
_schema_script = """
PRAGMA foreign_keys = ON;
CREATE TABLE edge(
edge_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
user_properties TEXT_USERPROPERTIES,
other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,
other_filename_hint TEXT NOT NULL,
other_element_hash TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name, other_uuid)
);
CREATE TABLE relation(
relation_id INTEGER PRIMARY KEY,
edge_id INTEGER,
other_element_id INTEGER NOT NULL,
element_id INTEGER,
proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,
mapping_level INTEGER NOT NULL,
FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (edge_id, other_element_id, element_id)
);
CREATE TABLE element(
element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */
/* label columns added programmatically */
);
CREATE TABLE location(
_location_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE structure(
_structure_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE quantity(
quantity_id INTEGER PRIMARY KEY,
_location_id INTEGER,
attributes TEXT_ATTRIBUTES NOT NULL,
value NUMERIC NOT NULL,
FOREIGN KEY(_location_id) REFERENCES location(_location_id)
);
CREATE TABLE weight(
weight_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name)
);
CREATE TABLE element_weight(
element_weight_id INTEGER PRIMARY KEY,
weight_id INTEGER,
element_id INTEGER,
value REAL NOT NULL,
FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (element_id, weight_id)
);
CREATE TABLE property(
key TEXT PRIMARY KEY NOT NULL,
value TEXT_JSON
);
INSERT INTO property VALUES ('schema_version', '1');
"""
def _is_wellformed_json(x):
"""Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
"""
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1
def _make_trigger_for_json(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
'''
def _is_wellformed_user_properties(x):
"""Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0
def _make_trigger_for_user_properties(insert_or_update, table, column):
"""Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
"""
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
'''
# MASKED: _is_wellformed_attributes function (lines 260-279)
def _make_trigger_for_attributes(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
'''
def _add_functions_and_triggers(connection):
"""Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
"""
if not SQLITE_JSON1_ENABLED:
try:
connection.create_function(
'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function(
'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function(
'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [
('edge', 'type_info'),
('quantity', 'attributes'),
('weight', 'type_info'),
]
for table, column in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column))
def _path_to_sqlite_uri(path):
"""Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
"""
if os.name == 'nt': # Windows
if re.match(r'^[a-zA-Z]:', path):
path = os.path.abspath(path) # If drive-letter, must be absolute.
drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter.
path = path[2:]
else:
drive_prefix = ''
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}'
def connect(path, mode='rwc'):
"""Returns a sqlite3 connection to a Toron node file."""
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = lambda: sqlite3.connect(
database=uri_path,
detect_types=sqlite3.PARSE_DECLTYPES,
isolation_level=None,
uri=True,
)
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script) # Create database schema.
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
# Raises OperationalError when *path* is a database with an unknown
# schema and DatabaseError when *path* is a file but not a database.
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
schema_version, *_ = cur.fetchone() or (None,)
cur.close()
if schema_version != 1: # When schema version is unsupported.
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con
_SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count())
class savepoint(object):
"""Context manager to wrap a block of code inside a SAVEPOINT.
If the block exists without errors, the SAVEPOINT is released
and the changes are committed. If an error occurs, all of the
changes are rolled back:
cur = con.cursor()
with savepoint(cur):
cur.execute(...)
"""
def __init__(self, cursor):
if cursor.connection.isolation_level is not None:
isolation_level = cursor.connection.isolation_level
msg = (
f'isolation_level must be None, got: {isolation_level!r}\n'
'\n'
'For explicit transaction handling, the connection must '
'be operating in "autocommit" mode. Turn on autocommit '
'mode by setting "con.isolation_level = None".'
)
raise sqlite3.OperationalError(msg)
self.name = next(_SAVEPOINT_NAME_GENERATOR)
self.cursor = cursor
def __enter__(self):
self.cursor.execute(f'SAVEPOINT {self.name}')
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.cursor.execute(f'RELEASE {self.name}')
else:
self.cursor.execute(f'ROLLBACK TO {self.name}')
@contextmanager
def transaction(path_or_connection, mode=None):
"""A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
"""
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = lambda: None # Don't close already-existing cursor.
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
yield cursor
finally:
cursor.close()
connection_close()
|
def _is_wellformed_attributes(x):
"""Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if not isinstance(obj, dict):
return 0
for value in obj.values():
if not isinstance(value, str):
return 0
return 1
| 260 | 279 |
"""Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
"""
import itertools
import os
import re
import sqlite3
from contextlib import contextmanager
from json import loads as _loads
from urllib.parse import quote as urllib_parse_quote
from ._exceptions import ToronError
sqlite3.register_converter('TEXT_JSON', _loads)
sqlite3.register_converter('TEXT_ATTRIBUTES', _loads)
def _is_sqlite_json1_enabled():
"""Check if SQLite implementation includes JSON1 extension."""
# The inclusion of JSON functions is optional when compiling SQLite.
# In versions 3.38.0 and newer, JSON functions are included by
# default but can be disabled (opt-out policy). For older versions
# of SQLite, JSON functions are available on an opt-in basis. It is
# necessary to test for their presence rathern than referencing the
# SQLite version number.
#
# For more information, see:
# https://www.sqlite.org/json1.html#compiling_in_json_support
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True
SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()
_schema_script = """
PRAGMA foreign_keys = ON;
CREATE TABLE edge(
edge_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
user_properties TEXT_USERPROPERTIES,
other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,
other_filename_hint TEXT NOT NULL,
other_element_hash TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name, other_uuid)
);
CREATE TABLE relation(
relation_id INTEGER PRIMARY KEY,
edge_id INTEGER,
other_element_id INTEGER NOT NULL,
element_id INTEGER,
proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,
mapping_level INTEGER NOT NULL,
FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (edge_id, other_element_id, element_id)
);
CREATE TABLE element(
element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */
/* label columns added programmatically */
);
CREATE TABLE location(
_location_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE structure(
_structure_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE quantity(
quantity_id INTEGER PRIMARY KEY,
_location_id INTEGER,
attributes TEXT_ATTRIBUTES NOT NULL,
value NUMERIC NOT NULL,
FOREIGN KEY(_location_id) REFERENCES location(_location_id)
);
CREATE TABLE weight(
weight_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name)
);
CREATE TABLE element_weight(
element_weight_id INTEGER PRIMARY KEY,
weight_id INTEGER,
element_id INTEGER,
value REAL NOT NULL,
FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (element_id, weight_id)
);
CREATE TABLE property(
key TEXT PRIMARY KEY NOT NULL,
value TEXT_JSON
);
INSERT INTO property VALUES ('schema_version', '1');
"""
def _is_wellformed_json(x):
"""Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
"""
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1
def _make_trigger_for_json(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
'''
def _is_wellformed_user_properties(x):
"""Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0
def _make_trigger_for_user_properties(insert_or_update, table, column):
"""Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
"""
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
'''
def _is_wellformed_attributes(x):
"""Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if not isinstance(obj, dict):
return 0
for value in obj.values():
if not isinstance(value, str):
return 0
return 1
def _make_trigger_for_attributes(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
'''
def _add_functions_and_triggers(connection):
"""Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
"""
if not SQLITE_JSON1_ENABLED:
try:
connection.create_function(
'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function(
'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function(
'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [
('edge', 'type_info'),
('quantity', 'attributes'),
('weight', 'type_info'),
]
for table, column in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column))
def _path_to_sqlite_uri(path):
"""Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
"""
if os.name == 'nt': # Windows
if re.match(r'^[a-zA-Z]:', path):
path = os.path.abspath(path) # If drive-letter, must be absolute.
drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter.
path = path[2:]
else:
drive_prefix = ''
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}'
def connect(path, mode='rwc'):
"""Returns a sqlite3 connection to a Toron node file."""
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = lambda: sqlite3.connect(
database=uri_path,
detect_types=sqlite3.PARSE_DECLTYPES,
isolation_level=None,
uri=True,
)
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script) # Create database schema.
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
# Raises OperationalError when *path* is a database with an unknown
# schema and DatabaseError when *path* is a file but not a database.
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
schema_version, *_ = cur.fetchone() or (None,)
cur.close()
if schema_version != 1: # When schema version is unsupported.
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con
_SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count())
class savepoint(object):
"""Context manager to wrap a block of code inside a SAVEPOINT.
If the block exists without errors, the SAVEPOINT is released
and the changes are committed. If an error occurs, all of the
changes are rolled back:
cur = con.cursor()
with savepoint(cur):
cur.execute(...)
"""
def __init__(self, cursor):
if cursor.connection.isolation_level is not None:
isolation_level = cursor.connection.isolation_level
msg = (
f'isolation_level must be None, got: {isolation_level!r}\n'
'\n'
'For explicit transaction handling, the connection must '
'be operating in "autocommit" mode. Turn on autocommit '
'mode by setting "con.isolation_level = None".'
)
raise sqlite3.OperationalError(msg)
self.name = next(_SAVEPOINT_NAME_GENERATOR)
self.cursor = cursor
def __enter__(self):
self.cursor.execute(f'SAVEPOINT {self.name}')
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.cursor.execute(f'RELEASE {self.name}')
else:
self.cursor.execute(f'ROLLBACK TO {self.name}')
@contextmanager
def transaction(path_or_connection, mode=None):
"""A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
"""
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = lambda: None # Don't close already-existing cursor.
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
yield cursor
finally:
cursor.close()
connection_close()
|
_path_to_sqlite_uri
|
Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
|
"""Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
"""
import itertools
import os
import re
import sqlite3
from contextlib import contextmanager
from json import loads as _loads
from urllib.parse import quote as urllib_parse_quote
from ._exceptions import ToronError
sqlite3.register_converter('TEXT_JSON', _loads)
sqlite3.register_converter('TEXT_ATTRIBUTES', _loads)
def _is_sqlite_json1_enabled():
"""Check if SQLite implementation includes JSON1 extension."""
# The inclusion of JSON functions is optional when compiling SQLite.
# In versions 3.38.0 and newer, JSON functions are included by
# default but can be disabled (opt-out policy). For older versions
# of SQLite, JSON functions are available on an opt-in basis. It is
# necessary to test for their presence rathern than referencing the
# SQLite version number.
#
# For more information, see:
# https://www.sqlite.org/json1.html#compiling_in_json_support
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True
SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()
_schema_script = """
PRAGMA foreign_keys = ON;
CREATE TABLE edge(
edge_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
user_properties TEXT_USERPROPERTIES,
other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,
other_filename_hint TEXT NOT NULL,
other_element_hash TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name, other_uuid)
);
CREATE TABLE relation(
relation_id INTEGER PRIMARY KEY,
edge_id INTEGER,
other_element_id INTEGER NOT NULL,
element_id INTEGER,
proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,
mapping_level INTEGER NOT NULL,
FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (edge_id, other_element_id, element_id)
);
CREATE TABLE element(
element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */
/* label columns added programmatically */
);
CREATE TABLE location(
_location_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE structure(
_structure_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE quantity(
quantity_id INTEGER PRIMARY KEY,
_location_id INTEGER,
attributes TEXT_ATTRIBUTES NOT NULL,
value NUMERIC NOT NULL,
FOREIGN KEY(_location_id) REFERENCES location(_location_id)
);
CREATE TABLE weight(
weight_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name)
);
CREATE TABLE element_weight(
element_weight_id INTEGER PRIMARY KEY,
weight_id INTEGER,
element_id INTEGER,
value REAL NOT NULL,
FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (element_id, weight_id)
);
CREATE TABLE property(
key TEXT PRIMARY KEY NOT NULL,
value TEXT_JSON
);
INSERT INTO property VALUES ('schema_version', '1');
"""
def _is_wellformed_json(x):
"""Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
"""
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1
def _make_trigger_for_json(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
'''
def _is_wellformed_user_properties(x):
"""Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0
def _make_trigger_for_user_properties(insert_or_update, table, column):
"""Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
"""
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
'''
def _is_wellformed_attributes(x):
"""Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if not isinstance(obj, dict):
return 0
for value in obj.values():
if not isinstance(value, str):
return 0
return 1
def _make_trigger_for_attributes(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
'''
def _add_functions_and_triggers(connection):
"""Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
"""
if not SQLITE_JSON1_ENABLED:
try:
connection.create_function(
'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function(
'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function(
'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [
('edge', 'type_info'),
('quantity', 'attributes'),
('weight', 'type_info'),
]
for table, column in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column))
# MASKED: _path_to_sqlite_uri function (lines 361-383)
def connect(path, mode='rwc'):
"""Returns a sqlite3 connection to a Toron node file."""
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = lambda: sqlite3.connect(
database=uri_path,
detect_types=sqlite3.PARSE_DECLTYPES,
isolation_level=None,
uri=True,
)
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script) # Create database schema.
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
# Raises OperationalError when *path* is a database with an unknown
# schema and DatabaseError when *path* is a file but not a database.
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
schema_version, *_ = cur.fetchone() or (None,)
cur.close()
if schema_version != 1: # When schema version is unsupported.
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con
_SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count())
class savepoint(object):
"""Context manager to wrap a block of code inside a SAVEPOINT.
If the block exists without errors, the SAVEPOINT is released
and the changes are committed. If an error occurs, all of the
changes are rolled back:
cur = con.cursor()
with savepoint(cur):
cur.execute(...)
"""
def __init__(self, cursor):
if cursor.connection.isolation_level is not None:
isolation_level = cursor.connection.isolation_level
msg = (
f'isolation_level must be None, got: {isolation_level!r}\n'
'\n'
'For explicit transaction handling, the connection must '
'be operating in "autocommit" mode. Turn on autocommit '
'mode by setting "con.isolation_level = None".'
)
raise sqlite3.OperationalError(msg)
self.name = next(_SAVEPOINT_NAME_GENERATOR)
self.cursor = cursor
def __enter__(self):
self.cursor.execute(f'SAVEPOINT {self.name}')
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.cursor.execute(f'RELEASE {self.name}')
else:
self.cursor.execute(f'ROLLBACK TO {self.name}')
@contextmanager
def transaction(path_or_connection, mode=None):
"""A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
"""
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = lambda: None # Don't close already-existing cursor.
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
yield cursor
finally:
cursor.close()
connection_close()
|
def _path_to_sqlite_uri(path):
"""Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
"""
if os.name == 'nt': # Windows
if re.match(r'^[a-zA-Z]:', path):
path = os.path.abspath(path) # If drive-letter, must be absolute.
drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter.
path = path[2:]
else:
drive_prefix = ''
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}'
| 361 | 383 |
"""Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
"""
import itertools
import os
import re
import sqlite3
from contextlib import contextmanager
from json import loads as _loads
from urllib.parse import quote as urllib_parse_quote
from ._exceptions import ToronError
sqlite3.register_converter('TEXT_JSON', _loads)
sqlite3.register_converter('TEXT_ATTRIBUTES', _loads)
def _is_sqlite_json1_enabled():
"""Check if SQLite implementation includes JSON1 extension."""
# The inclusion of JSON functions is optional when compiling SQLite.
# In versions 3.38.0 and newer, JSON functions are included by
# default but can be disabled (opt-out policy). For older versions
# of SQLite, JSON functions are available on an opt-in basis. It is
# necessary to test for their presence rathern than referencing the
# SQLite version number.
#
# For more information, see:
# https://www.sqlite.org/json1.html#compiling_in_json_support
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True
SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()
_schema_script = """
PRAGMA foreign_keys = ON;
CREATE TABLE edge(
edge_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
user_properties TEXT_USERPROPERTIES,
other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,
other_filename_hint TEXT NOT NULL,
other_element_hash TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name, other_uuid)
);
CREATE TABLE relation(
relation_id INTEGER PRIMARY KEY,
edge_id INTEGER,
other_element_id INTEGER NOT NULL,
element_id INTEGER,
proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,
mapping_level INTEGER NOT NULL,
FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (edge_id, other_element_id, element_id)
);
CREATE TABLE element(
element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */
/* label columns added programmatically */
);
CREATE TABLE location(
_location_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE structure(
_structure_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE quantity(
quantity_id INTEGER PRIMARY KEY,
_location_id INTEGER,
attributes TEXT_ATTRIBUTES NOT NULL,
value NUMERIC NOT NULL,
FOREIGN KEY(_location_id) REFERENCES location(_location_id)
);
CREATE TABLE weight(
weight_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name)
);
CREATE TABLE element_weight(
element_weight_id INTEGER PRIMARY KEY,
weight_id INTEGER,
element_id INTEGER,
value REAL NOT NULL,
FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (element_id, weight_id)
);
CREATE TABLE property(
key TEXT PRIMARY KEY NOT NULL,
value TEXT_JSON
);
INSERT INTO property VALUES ('schema_version', '1');
"""
def _is_wellformed_json(x):
"""Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
"""
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1
def _make_trigger_for_json(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
'''
def _is_wellformed_user_properties(x):
"""Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0
def _make_trigger_for_user_properties(insert_or_update, table, column):
"""Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
"""
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
'''
def _is_wellformed_attributes(x):
"""Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if not isinstance(obj, dict):
return 0
for value in obj.values():
if not isinstance(value, str):
return 0
return 1
def _make_trigger_for_attributes(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
'''
def _add_functions_and_triggers(connection):
"""Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
"""
if not SQLITE_JSON1_ENABLED:
try:
connection.create_function(
'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function(
'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function(
'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [
('edge', 'type_info'),
('quantity', 'attributes'),
('weight', 'type_info'),
]
for table, column in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column))
def _path_to_sqlite_uri(path):
"""Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
"""
if os.name == 'nt': # Windows
if re.match(r'^[a-zA-Z]:', path):
path = os.path.abspath(path) # If drive-letter, must be absolute.
drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter.
path = path[2:]
else:
drive_prefix = ''
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}'
def connect(path, mode='rwc'):
"""Returns a sqlite3 connection to a Toron node file."""
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = lambda: sqlite3.connect(
database=uri_path,
detect_types=sqlite3.PARSE_DECLTYPES,
isolation_level=None,
uri=True,
)
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script) # Create database schema.
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
# Raises OperationalError when *path* is a database with an unknown
# schema and DatabaseError when *path* is a file but not a database.
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
schema_version, *_ = cur.fetchone() or (None,)
cur.close()
if schema_version != 1: # When schema version is unsupported.
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con
_SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count())
class savepoint(object):
"""Context manager to wrap a block of code inside a SAVEPOINT.
If the block exists without errors, the SAVEPOINT is released
and the changes are committed. If an error occurs, all of the
changes are rolled back:
cur = con.cursor()
with savepoint(cur):
cur.execute(...)
"""
def __init__(self, cursor):
if cursor.connection.isolation_level is not None:
isolation_level = cursor.connection.isolation_level
msg = (
f'isolation_level must be None, got: {isolation_level!r}\n'
'\n'
'For explicit transaction handling, the connection must '
'be operating in "autocommit" mode. Turn on autocommit '
'mode by setting "con.isolation_level = None".'
)
raise sqlite3.OperationalError(msg)
self.name = next(_SAVEPOINT_NAME_GENERATOR)
self.cursor = cursor
def __enter__(self):
self.cursor.execute(f'SAVEPOINT {self.name}')
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.cursor.execute(f'RELEASE {self.name}')
else:
self.cursor.execute(f'ROLLBACK TO {self.name}')
@contextmanager
def transaction(path_or_connection, mode=None):
"""A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
"""
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = lambda: None # Don't close already-existing cursor.
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
yield cursor
finally:
cursor.close()
connection_close()
|
transaction
|
A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
|
"""Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
"""
import itertools
import os
import re
import sqlite3
from contextlib import contextmanager
from json import loads as _loads
from urllib.parse import quote as urllib_parse_quote
from ._exceptions import ToronError
sqlite3.register_converter('TEXT_JSON', _loads)
sqlite3.register_converter('TEXT_ATTRIBUTES', _loads)
def _is_sqlite_json1_enabled():
"""Check if SQLite implementation includes JSON1 extension."""
# The inclusion of JSON functions is optional when compiling SQLite.
# In versions 3.38.0 and newer, JSON functions are included by
# default but can be disabled (opt-out policy). For older versions
# of SQLite, JSON functions are available on an opt-in basis. It is
# necessary to test for their presence rathern than referencing the
# SQLite version number.
#
# For more information, see:
# https://www.sqlite.org/json1.html#compiling_in_json_support
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True
SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()
_schema_script = """
PRAGMA foreign_keys = ON;
CREATE TABLE edge(
edge_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
user_properties TEXT_USERPROPERTIES,
other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,
other_filename_hint TEXT NOT NULL,
other_element_hash TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name, other_uuid)
);
CREATE TABLE relation(
relation_id INTEGER PRIMARY KEY,
edge_id INTEGER,
other_element_id INTEGER NOT NULL,
element_id INTEGER,
proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,
mapping_level INTEGER NOT NULL,
FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (edge_id, other_element_id, element_id)
);
CREATE TABLE element(
element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */
/* label columns added programmatically */
);
CREATE TABLE location(
_location_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE structure(
_structure_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE quantity(
quantity_id INTEGER PRIMARY KEY,
_location_id INTEGER,
attributes TEXT_ATTRIBUTES NOT NULL,
value NUMERIC NOT NULL,
FOREIGN KEY(_location_id) REFERENCES location(_location_id)
);
CREATE TABLE weight(
weight_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name)
);
CREATE TABLE element_weight(
element_weight_id INTEGER PRIMARY KEY,
weight_id INTEGER,
element_id INTEGER,
value REAL NOT NULL,
FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (element_id, weight_id)
);
CREATE TABLE property(
key TEXT PRIMARY KEY NOT NULL,
value TEXT_JSON
);
INSERT INTO property VALUES ('schema_version', '1');
"""
def _is_wellformed_json(x):
"""Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
"""
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1
def _make_trigger_for_json(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
'''
def _is_wellformed_user_properties(x):
"""Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0
def _make_trigger_for_user_properties(insert_or_update, table, column):
"""Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
"""
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
'''
def _is_wellformed_attributes(x):
"""Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if not isinstance(obj, dict):
return 0
for value in obj.values():
if not isinstance(value, str):
return 0
return 1
def _make_trigger_for_attributes(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
'''
def _add_functions_and_triggers(connection):
"""Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
"""
if not SQLITE_JSON1_ENABLED:
try:
connection.create_function(
'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function(
'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function(
'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [
('edge', 'type_info'),
('quantity', 'attributes'),
('weight', 'type_info'),
]
for table, column in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column))
def _path_to_sqlite_uri(path):
"""Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
"""
if os.name == 'nt': # Windows
if re.match(r'^[a-zA-Z]:', path):
path = os.path.abspath(path) # If drive-letter, must be absolute.
drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter.
path = path[2:]
else:
drive_prefix = ''
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}'
def connect(path, mode='rwc'):
"""Returns a sqlite3 connection to a Toron node file."""
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = lambda: sqlite3.connect(
database=uri_path,
detect_types=sqlite3.PARSE_DECLTYPES,
isolation_level=None,
uri=True,
)
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script) # Create database schema.
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
# Raises OperationalError when *path* is a database with an unknown
# schema and DatabaseError when *path* is a file but not a database.
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
schema_version, *_ = cur.fetchone() or (None,)
cur.close()
if schema_version != 1: # When schema version is unsupported.
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con
_SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count())
class savepoint(object):
"""Context manager to wrap a block of code inside a SAVEPOINT.
If the block exists without errors, the SAVEPOINT is released
and the changes are committed. If an error occurs, all of the
changes are rolled back:
cur = con.cursor()
with savepoint(cur):
cur.execute(...)
"""
def __init__(self, cursor):
if cursor.connection.isolation_level is not None:
isolation_level = cursor.connection.isolation_level
msg = (
f'isolation_level must be None, got: {isolation_level!r}\n'
'\n'
'For explicit transaction handling, the connection must '
'be operating in "autocommit" mode. Turn on autocommit '
'mode by setting "con.isolation_level = None".'
)
raise sqlite3.OperationalError(msg)
self.name = next(_SAVEPOINT_NAME_GENERATOR)
self.cursor = cursor
def __enter__(self):
self.cursor.execute(f'SAVEPOINT {self.name}')
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.cursor.execute(f'RELEASE {self.name}')
else:
self.cursor.execute(f'ROLLBACK TO {self.name}')
# MASKED: transaction function (lines 464-484)
|
@contextmanager
def transaction(path_or_connection, mode=None):
"""A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
"""
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = lambda: None # Don't close already-existing cursor.
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
yield cursor
finally:
cursor.close()
connection_close()
| 464 | 484 |
"""Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
"""
import itertools
import os
import re
import sqlite3
from contextlib import contextmanager
from json import loads as _loads
from urllib.parse import quote as urllib_parse_quote
from ._exceptions import ToronError
sqlite3.register_converter('TEXT_JSON', _loads)
sqlite3.register_converter('TEXT_ATTRIBUTES', _loads)
def _is_sqlite_json1_enabled():
"""Check if SQLite implementation includes JSON1 extension."""
# The inclusion of JSON functions is optional when compiling SQLite.
# In versions 3.38.0 and newer, JSON functions are included by
# default but can be disabled (opt-out policy). For older versions
# of SQLite, JSON functions are available on an opt-in basis. It is
# necessary to test for their presence rathern than referencing the
# SQLite version number.
#
# For more information, see:
# https://www.sqlite.org/json1.html#compiling_in_json_support
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True
SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()
_schema_script = """
PRAGMA foreign_keys = ON;
CREATE TABLE edge(
edge_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
user_properties TEXT_USERPROPERTIES,
other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,
other_filename_hint TEXT NOT NULL,
other_element_hash TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name, other_uuid)
);
CREATE TABLE relation(
relation_id INTEGER PRIMARY KEY,
edge_id INTEGER,
other_element_id INTEGER NOT NULL,
element_id INTEGER,
proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,
mapping_level INTEGER NOT NULL,
FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (edge_id, other_element_id, element_id)
);
CREATE TABLE element(
element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */
/* label columns added programmatically */
);
CREATE TABLE location(
_location_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE structure(
_structure_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE quantity(
quantity_id INTEGER PRIMARY KEY,
_location_id INTEGER,
attributes TEXT_ATTRIBUTES NOT NULL,
value NUMERIC NOT NULL,
FOREIGN KEY(_location_id) REFERENCES location(_location_id)
);
CREATE TABLE weight(
weight_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name)
);
CREATE TABLE element_weight(
element_weight_id INTEGER PRIMARY KEY,
weight_id INTEGER,
element_id INTEGER,
value REAL NOT NULL,
FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (element_id, weight_id)
);
CREATE TABLE property(
key TEXT PRIMARY KEY NOT NULL,
value TEXT_JSON
);
INSERT INTO property VALUES ('schema_version', '1');
"""
def _is_wellformed_json(x):
"""Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
"""
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1
def _make_trigger_for_json(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
'''
def _is_wellformed_user_properties(x):
"""Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0
def _make_trigger_for_user_properties(insert_or_update, table, column):
"""Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
"""
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
'''
def _is_wellformed_attributes(x):
"""Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if not isinstance(obj, dict):
return 0
for value in obj.values():
if not isinstance(value, str):
return 0
return 1
def _make_trigger_for_attributes(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
'''
def _add_functions_and_triggers(connection):
"""Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
"""
if not SQLITE_JSON1_ENABLED:
try:
connection.create_function(
'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function(
'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function(
'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [
('edge', 'type_info'),
('quantity', 'attributes'),
('weight', 'type_info'),
]
for table, column in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column))
def _path_to_sqlite_uri(path):
"""Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
"""
if os.name == 'nt': # Windows
if re.match(r'^[a-zA-Z]:', path):
path = os.path.abspath(path) # If drive-letter, must be absolute.
drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter.
path = path[2:]
else:
drive_prefix = ''
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}'
def connect(path, mode='rwc'):
"""Returns a sqlite3 connection to a Toron node file."""
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = lambda: sqlite3.connect(
database=uri_path,
detect_types=sqlite3.PARSE_DECLTYPES,
isolation_level=None,
uri=True,
)
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script) # Create database schema.
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
# Raises OperationalError when *path* is a database with an unknown
# schema and DatabaseError when *path* is a file but not a database.
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
schema_version, *_ = cur.fetchone() or (None,)
cur.close()
if schema_version != 1: # When schema version is unsupported.
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con
_SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count())
class savepoint(object):
"""Context manager to wrap a block of code inside a SAVEPOINT.
If the block exists without errors, the SAVEPOINT is released
and the changes are committed. If an error occurs, all of the
changes are rolled back:
cur = con.cursor()
with savepoint(cur):
cur.execute(...)
"""
def __init__(self, cursor):
if cursor.connection.isolation_level is not None:
isolation_level = cursor.connection.isolation_level
msg = (
f'isolation_level must be None, got: {isolation_level!r}\n'
'\n'
'For explicit transaction handling, the connection must '
'be operating in "autocommit" mode. Turn on autocommit '
'mode by setting "con.isolation_level = None".'
)
raise sqlite3.OperationalError(msg)
self.name = next(_SAVEPOINT_NAME_GENERATOR)
self.cursor = cursor
def __enter__(self):
self.cursor.execute(f'SAVEPOINT {self.name}')
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.cursor.execute(f'RELEASE {self.name}')
else:
self.cursor.execute(f'ROLLBACK TO {self.name}')
@contextmanager
def transaction(path_or_connection, mode=None):
"""A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
"""
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = lambda: None # Don't close already-existing cursor.
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
yield cursor
finally:
cursor.close()
connection_close()
|
_find_playlist_info
|
Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id.
|
"""
Plugin for Czech TV (Ceska televize).
Following channels are working:
* CT1 - https://www.ceskatelevize.cz/porady/ct1/
* CT2 - https://www.ceskatelevize.cz/porady/ct2/
* CT24 - https://ct24.ceskatelevize.cz/#live
* CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/
* CT Decko - https://decko.ceskatelevize.cz/zive
* CT Art - https://www.ceskatelevize.cz/porady/art/
Additionally, videos from iVysilani archive should work as well.
"""
import json
import logging
import re
from html import unescape as html_unescape
from urllib.parse import quote
from streamlink.plugin import Plugin, PluginError, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.stream import DASHStream, HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r'https?://([\w-]+\.)*ceskatelevize\.cz'
))
class Ceskatelevize(Plugin):
ajax_url = 'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist'
_player_re = re.compile(
r'ivysilani/embed/iFramePlayer[^"]+'
)
_hash_re = re.compile(
r'hash:"([0-9a-z]+)"'
)
_playlist_info_re = re.compile(
r'{"type":"([a-z]+)","id":"([0-9]+)"'
)
_playlist_url_schema = validate.Schema({
validate.optional("streamingProtocol"): validate.text,
"url": validate.any(
validate.url(),
"Error",
"error_region"
)
})
_playlist_schema = validate.Schema({
"playlist": [{
validate.optional("type"): validate.text,
"streamUrls": {
"main": validate.url(),
}
}]
})
def _get_streams(self):
self.session.http.headers.update({'User-Agent': useragents.IPAD})
self.session.http.verify = False
log.warning('SSL certificate verification is disabled.')
# fetch requested url and find playlist info
response = self.session.http.get(self.url)
info = self._find_playlist_info(response)
if not info:
# do next try with new API
def _fallback_api(*args, **kwargs):
self.api2 = CeskatelevizeAPI2(self.session, self.url, *args, **kwargs)
return self.api2._get_streams()
# playlist info not found, let's try to find player url
player_url = self._find_player_url(response)
if not player_url:
log.debug('Cannot find playlist info or player url, do next try with new API')
return _fallback_api(res=response)
# get player url and try to find playlist info in it
response = self.session.http.get(player_url)
info = self._find_playlist_info(response)
if not info:
log.debug('Cannot find playlist info in the player url, do next try with new API')
return _fallback_api()
log.trace('{0!r}'.format(info))
data = {
'playlist[0][type]': info['type'],
'playlist[0][id]': info['id'],
'requestUrl': '/ivysilani/embed/iFramePlayer.php',
'requestSource': 'iVysilani',
'type': 'html'
}
headers = {
'x-addr': '127.0.0.1',
}
# fetch playlist url
response = self.session.http.post(
self.ajax_url,
data=data,
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_url_schema)
log.trace('{0!r}'.format(json_data))
if json_data['url'] in ['Error', 'error_region']:
log.error('This stream is not available')
return
# fetch playlist
response = self.session.http.post(json_data['url'])
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['playlist'][0]['streamUrls']['main']
return HLSStream.parse_variant_playlist(self.session, playlist)
# MASKED: _find_playlist_info function (lines 118-132)
@classmethod
def _find_player_url(cls, response):
"""
Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
"""
url = ''
matches = cls._player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if 'hash' not in tmp_url:
# there's no hash in the URL, try to find it
matches = cls._hash_re.search(response.text)
if matches:
url = tmp_url + '&hash=' + matches.group(1)
else:
url = tmp_url
return 'http://ceskatelevize.cz/' + url
class CeskatelevizeAPI2:
_player_api = 'https://playlist.ceskatelevize.cz/'
_url_re = re.compile(r'http(s)?://([^.]*.)?ceskatelevize.cz')
_playlist_info_re = re.compile(r'{\s*"type":\s*"([a-z]+)",\s*"id":\s*"(\w+)"')
_playlist_schema = validate.Schema({
"CODE": validate.contains("OK"),
"RESULT": {
"playlist": [{
"streamUrls": {
"main": validate.url(),
}
}]
}
})
_ctcomp_re = re.compile(r'data-ctcomp="Video"\sdata-video-id="(?P<val1>[^"]*)"\sdata-ctcomp-data="(?P<val2>[^"]+)">')
_ctcomp_schema = validate.Schema(
validate.text,
validate.transform(_ctcomp_re.findall),
validate.transform(lambda vl: [{"video-id": v[0], "ctcomp-data": json.loads(html_unescape(v[1]))} for v in vl])
)
_playlist_info_schema = validate.Schema({
"type": validate.text,
"id": validate.any(validate.text, int),
"key": validate.text,
"date": validate.text,
"requestSource": validate.text,
"drm": int,
validate.optional("canBePlay"): int,
validate.optional("assetId"): validate.text,
"quality": validate.text,
validate.optional("region"): int
})
def __init__(self, session, url, res=None):
self.session = session
self.url = url
self.response = res
def _get_streams(self):
if self.response is None:
infos = self.session.http.get(self.url, schema=self._ctcomp_schema)
else:
infos = self.session.http.json(self.response, schema=self._ctcomp_schema)
if not infos:
# playlist infos not found
raise PluginError('Cannot find playlist infos!')
vod_prio = len(infos) == 2
for info in infos:
try:
pl = info['ctcomp-data']['source']['playlist'][0]
except KeyError:
raise PluginError('Cannot find playlist info!')
pl = self._playlist_info_schema.validate(pl)
if vod_prio and pl['type'] != 'VOD':
continue
log.trace('{0!r}'.format(info))
if pl['type'] == 'LIVE':
data = {
"contentType": "live",
"items": [{
"id": pl["id"],
"assetId": pl["assetId"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"quality": pl["quality"],
}]
}
elif pl['type'] == 'VOD':
data = {
"contentType": "vod",
"items": [{
"id": pl["id"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"canBePlay": pl["canBePlay"],
"quality": pl["quality"],
"region": pl["region"]
}]
}
headers = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
}
data = json.dumps(data)
response = self.session.http.post(
self._player_api,
data="data={}".format(quote(data)),
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['RESULT']['playlist'][0]['streamUrls']['main']
yield from DASHStream.parse_manifest(self.session, playlist).items()
__plugin__ = Ceskatelevize
|
@classmethod
def _find_playlist_info(cls, response):
"""
Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id.
"""
values = {}
matches = cls._playlist_info_re.search(response.text)
if matches:
values['type'] = matches.group(1)
values['id'] = matches.group(2)
return values
| 118 | 132 |
"""
Plugin for Czech TV (Ceska televize).
Following channels are working:
* CT1 - https://www.ceskatelevize.cz/porady/ct1/
* CT2 - https://www.ceskatelevize.cz/porady/ct2/
* CT24 - https://ct24.ceskatelevize.cz/#live
* CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/
* CT Decko - https://decko.ceskatelevize.cz/zive
* CT Art - https://www.ceskatelevize.cz/porady/art/
Additionally, videos from iVysilani archive should work as well.
"""
import json
import logging
import re
from html import unescape as html_unescape
from urllib.parse import quote
from streamlink.plugin import Plugin, PluginError, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.stream import DASHStream, HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r'https?://([\w-]+\.)*ceskatelevize\.cz'
))
class Ceskatelevize(Plugin):
ajax_url = 'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist'
_player_re = re.compile(
r'ivysilani/embed/iFramePlayer[^"]+'
)
_hash_re = re.compile(
r'hash:"([0-9a-z]+)"'
)
_playlist_info_re = re.compile(
r'{"type":"([a-z]+)","id":"([0-9]+)"'
)
_playlist_url_schema = validate.Schema({
validate.optional("streamingProtocol"): validate.text,
"url": validate.any(
validate.url(),
"Error",
"error_region"
)
})
_playlist_schema = validate.Schema({
"playlist": [{
validate.optional("type"): validate.text,
"streamUrls": {
"main": validate.url(),
}
}]
})
def _get_streams(self):
self.session.http.headers.update({'User-Agent': useragents.IPAD})
self.session.http.verify = False
log.warning('SSL certificate verification is disabled.')
# fetch requested url and find playlist info
response = self.session.http.get(self.url)
info = self._find_playlist_info(response)
if not info:
# do next try with new API
def _fallback_api(*args, **kwargs):
self.api2 = CeskatelevizeAPI2(self.session, self.url, *args, **kwargs)
return self.api2._get_streams()
# playlist info not found, let's try to find player url
player_url = self._find_player_url(response)
if not player_url:
log.debug('Cannot find playlist info or player url, do next try with new API')
return _fallback_api(res=response)
# get player url and try to find playlist info in it
response = self.session.http.get(player_url)
info = self._find_playlist_info(response)
if not info:
log.debug('Cannot find playlist info in the player url, do next try with new API')
return _fallback_api()
log.trace('{0!r}'.format(info))
data = {
'playlist[0][type]': info['type'],
'playlist[0][id]': info['id'],
'requestUrl': '/ivysilani/embed/iFramePlayer.php',
'requestSource': 'iVysilani',
'type': 'html'
}
headers = {
'x-addr': '127.0.0.1',
}
# fetch playlist url
response = self.session.http.post(
self.ajax_url,
data=data,
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_url_schema)
log.trace('{0!r}'.format(json_data))
if json_data['url'] in ['Error', 'error_region']:
log.error('This stream is not available')
return
# fetch playlist
response = self.session.http.post(json_data['url'])
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['playlist'][0]['streamUrls']['main']
return HLSStream.parse_variant_playlist(self.session, playlist)
@classmethod
def _find_playlist_info(cls, response):
"""
Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id.
"""
values = {}
matches = cls._playlist_info_re.search(response.text)
if matches:
values['type'] = matches.group(1)
values['id'] = matches.group(2)
return values
@classmethod
def _find_player_url(cls, response):
"""
Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
"""
url = ''
matches = cls._player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if 'hash' not in tmp_url:
# there's no hash in the URL, try to find it
matches = cls._hash_re.search(response.text)
if matches:
url = tmp_url + '&hash=' + matches.group(1)
else:
url = tmp_url
return 'http://ceskatelevize.cz/' + url
class CeskatelevizeAPI2:
_player_api = 'https://playlist.ceskatelevize.cz/'
_url_re = re.compile(r'http(s)?://([^.]*.)?ceskatelevize.cz')
_playlist_info_re = re.compile(r'{\s*"type":\s*"([a-z]+)",\s*"id":\s*"(\w+)"')
_playlist_schema = validate.Schema({
"CODE": validate.contains("OK"),
"RESULT": {
"playlist": [{
"streamUrls": {
"main": validate.url(),
}
}]
}
})
_ctcomp_re = re.compile(r'data-ctcomp="Video"\sdata-video-id="(?P<val1>[^"]*)"\sdata-ctcomp-data="(?P<val2>[^"]+)">')
_ctcomp_schema = validate.Schema(
validate.text,
validate.transform(_ctcomp_re.findall),
validate.transform(lambda vl: [{"video-id": v[0], "ctcomp-data": json.loads(html_unescape(v[1]))} for v in vl])
)
_playlist_info_schema = validate.Schema({
"type": validate.text,
"id": validate.any(validate.text, int),
"key": validate.text,
"date": validate.text,
"requestSource": validate.text,
"drm": int,
validate.optional("canBePlay"): int,
validate.optional("assetId"): validate.text,
"quality": validate.text,
validate.optional("region"): int
})
def __init__(self, session, url, res=None):
self.session = session
self.url = url
self.response = res
def _get_streams(self):
if self.response is None:
infos = self.session.http.get(self.url, schema=self._ctcomp_schema)
else:
infos = self.session.http.json(self.response, schema=self._ctcomp_schema)
if not infos:
# playlist infos not found
raise PluginError('Cannot find playlist infos!')
vod_prio = len(infos) == 2
for info in infos:
try:
pl = info['ctcomp-data']['source']['playlist'][0]
except KeyError:
raise PluginError('Cannot find playlist info!')
pl = self._playlist_info_schema.validate(pl)
if vod_prio and pl['type'] != 'VOD':
continue
log.trace('{0!r}'.format(info))
if pl['type'] == 'LIVE':
data = {
"contentType": "live",
"items": [{
"id": pl["id"],
"assetId": pl["assetId"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"quality": pl["quality"],
}]
}
elif pl['type'] == 'VOD':
data = {
"contentType": "vod",
"items": [{
"id": pl["id"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"canBePlay": pl["canBePlay"],
"quality": pl["quality"],
"region": pl["region"]
}]
}
headers = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
}
data = json.dumps(data)
response = self.session.http.post(
self._player_api,
data="data={}".format(quote(data)),
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['RESULT']['playlist'][0]['streamUrls']['main']
yield from DASHStream.parse_manifest(self.session, playlist).items()
__plugin__ = Ceskatelevize
|
_find_player_url
|
Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
|
"""
Plugin for Czech TV (Ceska televize).
Following channels are working:
* CT1 - https://www.ceskatelevize.cz/porady/ct1/
* CT2 - https://www.ceskatelevize.cz/porady/ct2/
* CT24 - https://ct24.ceskatelevize.cz/#live
* CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/
* CT Decko - https://decko.ceskatelevize.cz/zive
* CT Art - https://www.ceskatelevize.cz/porady/art/
Additionally, videos from iVysilani archive should work as well.
"""
import json
import logging
import re
from html import unescape as html_unescape
from urllib.parse import quote
from streamlink.plugin import Plugin, PluginError, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.stream import DASHStream, HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r'https?://([\w-]+\.)*ceskatelevize\.cz'
))
class Ceskatelevize(Plugin):
ajax_url = 'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist'
_player_re = re.compile(
r'ivysilani/embed/iFramePlayer[^"]+'
)
_hash_re = re.compile(
r'hash:"([0-9a-z]+)"'
)
_playlist_info_re = re.compile(
r'{"type":"([a-z]+)","id":"([0-9]+)"'
)
_playlist_url_schema = validate.Schema({
validate.optional("streamingProtocol"): validate.text,
"url": validate.any(
validate.url(),
"Error",
"error_region"
)
})
_playlist_schema = validate.Schema({
"playlist": [{
validate.optional("type"): validate.text,
"streamUrls": {
"main": validate.url(),
}
}]
})
def _get_streams(self):
self.session.http.headers.update({'User-Agent': useragents.IPAD})
self.session.http.verify = False
log.warning('SSL certificate verification is disabled.')
# fetch requested url and find playlist info
response = self.session.http.get(self.url)
info = self._find_playlist_info(response)
if not info:
# do next try with new API
def _fallback_api(*args, **kwargs):
self.api2 = CeskatelevizeAPI2(self.session, self.url, *args, **kwargs)
return self.api2._get_streams()
# playlist info not found, let's try to find player url
player_url = self._find_player_url(response)
if not player_url:
log.debug('Cannot find playlist info or player url, do next try with new API')
return _fallback_api(res=response)
# get player url and try to find playlist info in it
response = self.session.http.get(player_url)
info = self._find_playlist_info(response)
if not info:
log.debug('Cannot find playlist info in the player url, do next try with new API')
return _fallback_api()
log.trace('{0!r}'.format(info))
data = {
'playlist[0][type]': info['type'],
'playlist[0][id]': info['id'],
'requestUrl': '/ivysilani/embed/iFramePlayer.php',
'requestSource': 'iVysilani',
'type': 'html'
}
headers = {
'x-addr': '127.0.0.1',
}
# fetch playlist url
response = self.session.http.post(
self.ajax_url,
data=data,
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_url_schema)
log.trace('{0!r}'.format(json_data))
if json_data['url'] in ['Error', 'error_region']:
log.error('This stream is not available')
return
# fetch playlist
response = self.session.http.post(json_data['url'])
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['playlist'][0]['streamUrls']['main']
return HLSStream.parse_variant_playlist(self.session, playlist)
@classmethod
def _find_playlist_info(cls, response):
"""
Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id.
"""
values = {}
matches = cls._playlist_info_re.search(response.text)
if matches:
values['type'] = matches.group(1)
values['id'] = matches.group(2)
return values
# MASKED: _find_player_url function (lines 134-154)
class CeskatelevizeAPI2:
_player_api = 'https://playlist.ceskatelevize.cz/'
_url_re = re.compile(r'http(s)?://([^.]*.)?ceskatelevize.cz')
_playlist_info_re = re.compile(r'{\s*"type":\s*"([a-z]+)",\s*"id":\s*"(\w+)"')
_playlist_schema = validate.Schema({
"CODE": validate.contains("OK"),
"RESULT": {
"playlist": [{
"streamUrls": {
"main": validate.url(),
}
}]
}
})
_ctcomp_re = re.compile(r'data-ctcomp="Video"\sdata-video-id="(?P<val1>[^"]*)"\sdata-ctcomp-data="(?P<val2>[^"]+)">')
_ctcomp_schema = validate.Schema(
validate.text,
validate.transform(_ctcomp_re.findall),
validate.transform(lambda vl: [{"video-id": v[0], "ctcomp-data": json.loads(html_unescape(v[1]))} for v in vl])
)
_playlist_info_schema = validate.Schema({
"type": validate.text,
"id": validate.any(validate.text, int),
"key": validate.text,
"date": validate.text,
"requestSource": validate.text,
"drm": int,
validate.optional("canBePlay"): int,
validate.optional("assetId"): validate.text,
"quality": validate.text,
validate.optional("region"): int
})
def __init__(self, session, url, res=None):
self.session = session
self.url = url
self.response = res
def _get_streams(self):
if self.response is None:
infos = self.session.http.get(self.url, schema=self._ctcomp_schema)
else:
infos = self.session.http.json(self.response, schema=self._ctcomp_schema)
if not infos:
# playlist infos not found
raise PluginError('Cannot find playlist infos!')
vod_prio = len(infos) == 2
for info in infos:
try:
pl = info['ctcomp-data']['source']['playlist'][0]
except KeyError:
raise PluginError('Cannot find playlist info!')
pl = self._playlist_info_schema.validate(pl)
if vod_prio and pl['type'] != 'VOD':
continue
log.trace('{0!r}'.format(info))
if pl['type'] == 'LIVE':
data = {
"contentType": "live",
"items": [{
"id": pl["id"],
"assetId": pl["assetId"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"quality": pl["quality"],
}]
}
elif pl['type'] == 'VOD':
data = {
"contentType": "vod",
"items": [{
"id": pl["id"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"canBePlay": pl["canBePlay"],
"quality": pl["quality"],
"region": pl["region"]
}]
}
headers = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
}
data = json.dumps(data)
response = self.session.http.post(
self._player_api,
data="data={}".format(quote(data)),
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['RESULT']['playlist'][0]['streamUrls']['main']
yield from DASHStream.parse_manifest(self.session, playlist).items()
__plugin__ = Ceskatelevize
|
@classmethod
def _find_player_url(cls, response):
"""
Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
"""
url = ''
matches = cls._player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if 'hash' not in tmp_url:
# there's no hash in the URL, try to find it
matches = cls._hash_re.search(response.text)
if matches:
url = tmp_url + '&hash=' + matches.group(1)
else:
url = tmp_url
return 'http://ceskatelevize.cz/' + url
| 134 | 154 |
"""
Plugin for Czech TV (Ceska televize).
Following channels are working:
* CT1 - https://www.ceskatelevize.cz/porady/ct1/
* CT2 - https://www.ceskatelevize.cz/porady/ct2/
* CT24 - https://ct24.ceskatelevize.cz/#live
* CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/
* CT Decko - https://decko.ceskatelevize.cz/zive
* CT Art - https://www.ceskatelevize.cz/porady/art/
Additionally, videos from iVysilani archive should work as well.
"""
import json
import logging
import re
from html import unescape as html_unescape
from urllib.parse import quote
from streamlink.plugin import Plugin, PluginError, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.stream import DASHStream, HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r'https?://([\w-]+\.)*ceskatelevize\.cz'
))
class Ceskatelevize(Plugin):
ajax_url = 'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist'
_player_re = re.compile(
r'ivysilani/embed/iFramePlayer[^"]+'
)
_hash_re = re.compile(
r'hash:"([0-9a-z]+)"'
)
_playlist_info_re = re.compile(
r'{"type":"([a-z]+)","id":"([0-9]+)"'
)
_playlist_url_schema = validate.Schema({
validate.optional("streamingProtocol"): validate.text,
"url": validate.any(
validate.url(),
"Error",
"error_region"
)
})
_playlist_schema = validate.Schema({
"playlist": [{
validate.optional("type"): validate.text,
"streamUrls": {
"main": validate.url(),
}
}]
})
def _get_streams(self):
self.session.http.headers.update({'User-Agent': useragents.IPAD})
self.session.http.verify = False
log.warning('SSL certificate verification is disabled.')
# fetch requested url and find playlist info
response = self.session.http.get(self.url)
info = self._find_playlist_info(response)
if not info:
# do next try with new API
def _fallback_api(*args, **kwargs):
self.api2 = CeskatelevizeAPI2(self.session, self.url, *args, **kwargs)
return self.api2._get_streams()
# playlist info not found, let's try to find player url
player_url = self._find_player_url(response)
if not player_url:
log.debug('Cannot find playlist info or player url, do next try with new API')
return _fallback_api(res=response)
# get player url and try to find playlist info in it
response = self.session.http.get(player_url)
info = self._find_playlist_info(response)
if not info:
log.debug('Cannot find playlist info in the player url, do next try with new API')
return _fallback_api()
log.trace('{0!r}'.format(info))
data = {
'playlist[0][type]': info['type'],
'playlist[0][id]': info['id'],
'requestUrl': '/ivysilani/embed/iFramePlayer.php',
'requestSource': 'iVysilani',
'type': 'html'
}
headers = {
'x-addr': '127.0.0.1',
}
# fetch playlist url
response = self.session.http.post(
self.ajax_url,
data=data,
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_url_schema)
log.trace('{0!r}'.format(json_data))
if json_data['url'] in ['Error', 'error_region']:
log.error('This stream is not available')
return
# fetch playlist
response = self.session.http.post(json_data['url'])
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['playlist'][0]['streamUrls']['main']
return HLSStream.parse_variant_playlist(self.session, playlist)
@classmethod
def _find_playlist_info(cls, response):
"""
Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id.
"""
values = {}
matches = cls._playlist_info_re.search(response.text)
if matches:
values['type'] = matches.group(1)
values['id'] = matches.group(2)
return values
@classmethod
def _find_player_url(cls, response):
"""
Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
"""
url = ''
matches = cls._player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if 'hash' not in tmp_url:
# there's no hash in the URL, try to find it
matches = cls._hash_re.search(response.text)
if matches:
url = tmp_url + '&hash=' + matches.group(1)
else:
url = tmp_url
return 'http://ceskatelevize.cz/' + url
class CeskatelevizeAPI2:
_player_api = 'https://playlist.ceskatelevize.cz/'
_url_re = re.compile(r'http(s)?://([^.]*.)?ceskatelevize.cz')
_playlist_info_re = re.compile(r'{\s*"type":\s*"([a-z]+)",\s*"id":\s*"(\w+)"')
_playlist_schema = validate.Schema({
"CODE": validate.contains("OK"),
"RESULT": {
"playlist": [{
"streamUrls": {
"main": validate.url(),
}
}]
}
})
_ctcomp_re = re.compile(r'data-ctcomp="Video"\sdata-video-id="(?P<val1>[^"]*)"\sdata-ctcomp-data="(?P<val2>[^"]+)">')
_ctcomp_schema = validate.Schema(
validate.text,
validate.transform(_ctcomp_re.findall),
validate.transform(lambda vl: [{"video-id": v[0], "ctcomp-data": json.loads(html_unescape(v[1]))} for v in vl])
)
_playlist_info_schema = validate.Schema({
"type": validate.text,
"id": validate.any(validate.text, int),
"key": validate.text,
"date": validate.text,
"requestSource": validate.text,
"drm": int,
validate.optional("canBePlay"): int,
validate.optional("assetId"): validate.text,
"quality": validate.text,
validate.optional("region"): int
})
def __init__(self, session, url, res=None):
self.session = session
self.url = url
self.response = res
def _get_streams(self):
if self.response is None:
infos = self.session.http.get(self.url, schema=self._ctcomp_schema)
else:
infos = self.session.http.json(self.response, schema=self._ctcomp_schema)
if not infos:
# playlist infos not found
raise PluginError('Cannot find playlist infos!')
vod_prio = len(infos) == 2
for info in infos:
try:
pl = info['ctcomp-data']['source']['playlist'][0]
except KeyError:
raise PluginError('Cannot find playlist info!')
pl = self._playlist_info_schema.validate(pl)
if vod_prio and pl['type'] != 'VOD':
continue
log.trace('{0!r}'.format(info))
if pl['type'] == 'LIVE':
data = {
"contentType": "live",
"items": [{
"id": pl["id"],
"assetId": pl["assetId"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"quality": pl["quality"],
}]
}
elif pl['type'] == 'VOD':
data = {
"contentType": "vod",
"items": [{
"id": pl["id"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"canBePlay": pl["canBePlay"],
"quality": pl["quality"],
"region": pl["region"]
}]
}
headers = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
}
data = json.dumps(data)
response = self.session.http.post(
self._player_api,
data="data={}".format(quote(data)),
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['RESULT']['playlist'][0]['streamUrls']['main']
yield from DASHStream.parse_manifest(self.session, playlist).items()
__plugin__ = Ceskatelevize
|
imdecode
|
Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read individual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import json
import warnings
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray import _internal
from ..ndarray._internal import _cvimresize as imresize
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .. import recordio
def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
return _internal._cvimread(filename, *args, **kwargs)
# MASKED: imdecode function (lines 86-137)
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if random.random() < 0.5:
new_h, new_w = new_w, new_h
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
class Augmenter(object):
"""Image Augmenter base class"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class SequentialAug(Augmenter):
"""Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
"""
def __init__(self, ts):
super(SequentialAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
for aug in self.ts:
src = aug(src)
return src
class ResizeAug(Augmenter):
"""Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return resize_short(src, self.size, self.interp)
class ForceResizeAug(Augmenter):
"""Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ForceResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))
class RandomCropAug(Augmenter):
"""Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(RandomCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return random_crop(src, self.size, self.interp)[0]
class RandomSizedCropAug(Augmenter):
"""Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, area, ratio, interp=2, **kwargs):
super(RandomSizedCropAug, self).__init__(size=size, area=area,
ratio=ratio, interp=interp)
self.size = size
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
self.area = kwargs.pop('min_area')
else:
self.area = area
self.ratio = ratio
self.interp = interp
assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`."
def __call__(self, src):
"""Augmenter body"""
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]
class CenterCropAug(Augmenter):
"""Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(CenterCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return center_crop(src, self.size, self.interp)[0]
class RandomOrderAug(Augmenter):
"""Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
"""
def __init__(self, ts):
super(RandomOrderAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src
class BrightnessJitterAug(Augmenter):
"""Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
"""
def __init__(self, brightness):
super(BrightnessJitterAug, self).__init__(brightness=brightness)
self.brightness = brightness
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
src *= alpha
return src
class ContrastJitterAug(Augmenter):
"""Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
"""
def __init__(self, contrast):
super(ContrastJitterAug, self).__init__(contrast=contrast)
self.contrast = contrast
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.contrast, self.contrast)
gray = src * self.coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
class SaturationJitterAug(Augmenter):
"""Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, saturation):
super(SaturationJitterAug, self).__init__(saturation=saturation)
self.saturation = saturation
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
gray = src * self.coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
class HueJitterAug(Augmenter):
"""Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
"""
def __init__(self, hue):
super(HueJitterAug, self).__init__(hue=hue)
self.hue = hue
self.tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
self.ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
def __call__(self, src):
"""Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
"""
alpha = random.uniform(-self.hue, self.hue)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src
class ColorJitterAug(RandomOrderAug):
"""Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, brightness, contrast, saturation):
ts = []
if brightness > 0:
ts.append(BrightnessJitterAug(brightness))
if contrast > 0:
ts.append(ContrastJitterAug(contrast))
if saturation > 0:
ts.append(SaturationJitterAug(saturation))
super(ColorJitterAug, self).__init__(ts)
class LightingAug(Augmenter):
"""Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
"""
def __init__(self, alphastd, eigval, eigvec):
super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, src):
"""Augmenter body"""
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot(self.eigvec * alpha, self.eigval)
src += nd.array(rgb)
return src
class ColorNormalizeAug(Augmenter):
"""Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
"""
def __init__(self, mean, std):
super(ColorNormalizeAug, self).__init__(mean=mean, std=std)
self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)
self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)
def __call__(self, src):
"""Augmenter body"""
return color_normalize(src, self.mean, self.std)
class RandomGrayAug(Augmenter):
"""Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
"""
def __init__(self, p):
super(RandomGrayAug, self).__init__(p=p)
self.p = p
self.mat = nd.array([[0.21, 0.21, 0.21],
[0.72, 0.72, 0.72],
[0.07, 0.07, 0.07]])
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.dot(src, self.mat)
return src
class HorizontalFlipAug(Augmenter):
"""Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
"""
def __init__(self, p):
super(HorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
return src
class CastAug(Augmenter):
"""Cast to float32"""
def __init__(self, typ='float32'):
super(CastAug, self).__init__(type=typ)
self.typ = typ
def __call__(self, src):
"""Augmenter body"""
src = src.astype(self.typ)
return src
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', dtype='float32',
last_batch_handle='pad', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
' larger number to use more threads.')
class_name = self.__class__.__name__
if path_imgrec:
logging.info('%s: loading recordio %s...',
class_name, path_imgrec)
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array(line[1:-1], dtype=dtype)
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1], dtype=dtype)
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]], dtype=dtype)
else:
label = nd.array(img[0], dtype=dtype)
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self._allow_read = True
self.last_batch_handle = last_batch_handle
self.num_image = len(self.seq) if self.seq is not None else None
self._cache_data = None
self._cache_label = None
self._cache_idx = None
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
# clear the cache data
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
|
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
| 86 | 137 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read individual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import json
import warnings
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray import _internal
from ..ndarray._internal import _cvimresize as imresize
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .. import recordio
def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
return _internal._cvimread(filename, *args, **kwargs)
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if random.random() < 0.5:
new_h, new_w = new_w, new_h
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
class Augmenter(object):
"""Image Augmenter base class"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class SequentialAug(Augmenter):
"""Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
"""
def __init__(self, ts):
super(SequentialAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
for aug in self.ts:
src = aug(src)
return src
class ResizeAug(Augmenter):
"""Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return resize_short(src, self.size, self.interp)
class ForceResizeAug(Augmenter):
"""Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ForceResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))
class RandomCropAug(Augmenter):
"""Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(RandomCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return random_crop(src, self.size, self.interp)[0]
class RandomSizedCropAug(Augmenter):
"""Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, area, ratio, interp=2, **kwargs):
super(RandomSizedCropAug, self).__init__(size=size, area=area,
ratio=ratio, interp=interp)
self.size = size
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
self.area = kwargs.pop('min_area')
else:
self.area = area
self.ratio = ratio
self.interp = interp
assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`."
def __call__(self, src):
"""Augmenter body"""
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]
class CenterCropAug(Augmenter):
"""Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(CenterCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return center_crop(src, self.size, self.interp)[0]
class RandomOrderAug(Augmenter):
"""Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
"""
def __init__(self, ts):
super(RandomOrderAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src
class BrightnessJitterAug(Augmenter):
"""Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
"""
def __init__(self, brightness):
super(BrightnessJitterAug, self).__init__(brightness=brightness)
self.brightness = brightness
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
src *= alpha
return src
class ContrastJitterAug(Augmenter):
"""Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
"""
def __init__(self, contrast):
super(ContrastJitterAug, self).__init__(contrast=contrast)
self.contrast = contrast
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.contrast, self.contrast)
gray = src * self.coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
class SaturationJitterAug(Augmenter):
"""Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, saturation):
super(SaturationJitterAug, self).__init__(saturation=saturation)
self.saturation = saturation
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
gray = src * self.coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
class HueJitterAug(Augmenter):
"""Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
"""
def __init__(self, hue):
super(HueJitterAug, self).__init__(hue=hue)
self.hue = hue
self.tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
self.ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
def __call__(self, src):
"""Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
"""
alpha = random.uniform(-self.hue, self.hue)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src
class ColorJitterAug(RandomOrderAug):
"""Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, brightness, contrast, saturation):
ts = []
if brightness > 0:
ts.append(BrightnessJitterAug(brightness))
if contrast > 0:
ts.append(ContrastJitterAug(contrast))
if saturation > 0:
ts.append(SaturationJitterAug(saturation))
super(ColorJitterAug, self).__init__(ts)
class LightingAug(Augmenter):
"""Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
"""
def __init__(self, alphastd, eigval, eigvec):
super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, src):
"""Augmenter body"""
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot(self.eigvec * alpha, self.eigval)
src += nd.array(rgb)
return src
class ColorNormalizeAug(Augmenter):
"""Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
"""
def __init__(self, mean, std):
super(ColorNormalizeAug, self).__init__(mean=mean, std=std)
self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)
self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)
def __call__(self, src):
"""Augmenter body"""
return color_normalize(src, self.mean, self.std)
class RandomGrayAug(Augmenter):
"""Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
"""
def __init__(self, p):
super(RandomGrayAug, self).__init__(p=p)
self.p = p
self.mat = nd.array([[0.21, 0.21, 0.21],
[0.72, 0.72, 0.72],
[0.07, 0.07, 0.07]])
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.dot(src, self.mat)
return src
class HorizontalFlipAug(Augmenter):
"""Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
"""
def __init__(self, p):
super(HorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
return src
class CastAug(Augmenter):
"""Cast to float32"""
def __init__(self, typ='float32'):
super(CastAug, self).__init__(type=typ)
self.typ = typ
def __call__(self, src):
"""Augmenter body"""
src = src.astype(self.typ)
return src
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', dtype='float32',
last_batch_handle='pad', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
' larger number to use more threads.')
class_name = self.__class__.__name__
if path_imgrec:
logging.info('%s: loading recordio %s...',
class_name, path_imgrec)
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array(line[1:-1], dtype=dtype)
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1], dtype=dtype)
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]], dtype=dtype)
else:
label = nd.array(img[0], dtype=dtype)
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self._allow_read = True
self.last_batch_handle = last_batch_handle
self.num_image = len(self.seq) if self.seq is not None else None
self._cache_data = None
self._cache_label = None
self._cache_idx = None
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
# clear the cache data
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
|
scale_down
|
Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read individual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import json
import warnings
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray import _internal
from ..ndarray._internal import _cvimresize as imresize
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .. import recordio
def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
return _internal._cvimread(filename, *args, **kwargs)
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
# MASKED: scale_down function (lines 140-172)
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if random.random() < 0.5:
new_h, new_w = new_w, new_h
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
class Augmenter(object):
"""Image Augmenter base class"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class SequentialAug(Augmenter):
"""Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
"""
def __init__(self, ts):
super(SequentialAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
for aug in self.ts:
src = aug(src)
return src
class ResizeAug(Augmenter):
"""Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return resize_short(src, self.size, self.interp)
class ForceResizeAug(Augmenter):
"""Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ForceResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))
class RandomCropAug(Augmenter):
"""Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(RandomCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return random_crop(src, self.size, self.interp)[0]
class RandomSizedCropAug(Augmenter):
"""Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, area, ratio, interp=2, **kwargs):
super(RandomSizedCropAug, self).__init__(size=size, area=area,
ratio=ratio, interp=interp)
self.size = size
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
self.area = kwargs.pop('min_area')
else:
self.area = area
self.ratio = ratio
self.interp = interp
assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`."
def __call__(self, src):
"""Augmenter body"""
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]
class CenterCropAug(Augmenter):
"""Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(CenterCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return center_crop(src, self.size, self.interp)[0]
class RandomOrderAug(Augmenter):
"""Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
"""
def __init__(self, ts):
super(RandomOrderAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src
class BrightnessJitterAug(Augmenter):
"""Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
"""
def __init__(self, brightness):
super(BrightnessJitterAug, self).__init__(brightness=brightness)
self.brightness = brightness
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
src *= alpha
return src
class ContrastJitterAug(Augmenter):
"""Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
"""
def __init__(self, contrast):
super(ContrastJitterAug, self).__init__(contrast=contrast)
self.contrast = contrast
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.contrast, self.contrast)
gray = src * self.coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
class SaturationJitterAug(Augmenter):
"""Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, saturation):
super(SaturationJitterAug, self).__init__(saturation=saturation)
self.saturation = saturation
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
gray = src * self.coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
class HueJitterAug(Augmenter):
"""Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
"""
def __init__(self, hue):
super(HueJitterAug, self).__init__(hue=hue)
self.hue = hue
self.tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
self.ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
def __call__(self, src):
"""Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
"""
alpha = random.uniform(-self.hue, self.hue)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src
class ColorJitterAug(RandomOrderAug):
"""Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, brightness, contrast, saturation):
ts = []
if brightness > 0:
ts.append(BrightnessJitterAug(brightness))
if contrast > 0:
ts.append(ContrastJitterAug(contrast))
if saturation > 0:
ts.append(SaturationJitterAug(saturation))
super(ColorJitterAug, self).__init__(ts)
class LightingAug(Augmenter):
"""Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
"""
def __init__(self, alphastd, eigval, eigvec):
super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, src):
"""Augmenter body"""
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot(self.eigvec * alpha, self.eigval)
src += nd.array(rgb)
return src
class ColorNormalizeAug(Augmenter):
"""Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
"""
def __init__(self, mean, std):
super(ColorNormalizeAug, self).__init__(mean=mean, std=std)
self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)
self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)
def __call__(self, src):
"""Augmenter body"""
return color_normalize(src, self.mean, self.std)
class RandomGrayAug(Augmenter):
"""Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
"""
def __init__(self, p):
super(RandomGrayAug, self).__init__(p=p)
self.p = p
self.mat = nd.array([[0.21, 0.21, 0.21],
[0.72, 0.72, 0.72],
[0.07, 0.07, 0.07]])
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.dot(src, self.mat)
return src
class HorizontalFlipAug(Augmenter):
"""Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
"""
def __init__(self, p):
super(HorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
return src
class CastAug(Augmenter):
"""Cast to float32"""
def __init__(self, typ='float32'):
super(CastAug, self).__init__(type=typ)
self.typ = typ
def __call__(self, src):
"""Augmenter body"""
src = src.astype(self.typ)
return src
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', dtype='float32',
last_batch_handle='pad', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
' larger number to use more threads.')
class_name = self.__class__.__name__
if path_imgrec:
logging.info('%s: loading recordio %s...',
class_name, path_imgrec)
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array(line[1:-1], dtype=dtype)
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1], dtype=dtype)
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]], dtype=dtype)
else:
label = nd.array(img[0], dtype=dtype)
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self._allow_read = True
self.last_batch_handle = last_batch_handle
self.num_image = len(self.seq) if self.seq is not None else None
self._cache_data = None
self._cache_label = None
self._cache_idx = None
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
# clear the cache data
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
|
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
| 140 | 172 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read individual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import json
import warnings
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray import _internal
from ..ndarray._internal import _cvimresize as imresize
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .. import recordio
def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
return _internal._cvimread(filename, *args, **kwargs)
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if random.random() < 0.5:
new_h, new_w = new_w, new_h
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
class Augmenter(object):
"""Image Augmenter base class"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class SequentialAug(Augmenter):
"""Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
"""
def __init__(self, ts):
super(SequentialAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
for aug in self.ts:
src = aug(src)
return src
class ResizeAug(Augmenter):
"""Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return resize_short(src, self.size, self.interp)
class ForceResizeAug(Augmenter):
"""Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ForceResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))
class RandomCropAug(Augmenter):
"""Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(RandomCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return random_crop(src, self.size, self.interp)[0]
class RandomSizedCropAug(Augmenter):
"""Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, area, ratio, interp=2, **kwargs):
super(RandomSizedCropAug, self).__init__(size=size, area=area,
ratio=ratio, interp=interp)
self.size = size
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
self.area = kwargs.pop('min_area')
else:
self.area = area
self.ratio = ratio
self.interp = interp
assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`."
def __call__(self, src):
"""Augmenter body"""
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]
class CenterCropAug(Augmenter):
"""Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(CenterCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return center_crop(src, self.size, self.interp)[0]
class RandomOrderAug(Augmenter):
"""Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
"""
def __init__(self, ts):
super(RandomOrderAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src
class BrightnessJitterAug(Augmenter):
"""Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
"""
def __init__(self, brightness):
super(BrightnessJitterAug, self).__init__(brightness=brightness)
self.brightness = brightness
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
src *= alpha
return src
class ContrastJitterAug(Augmenter):
"""Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
"""
def __init__(self, contrast):
super(ContrastJitterAug, self).__init__(contrast=contrast)
self.contrast = contrast
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.contrast, self.contrast)
gray = src * self.coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
class SaturationJitterAug(Augmenter):
"""Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, saturation):
super(SaturationJitterAug, self).__init__(saturation=saturation)
self.saturation = saturation
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
gray = src * self.coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
class HueJitterAug(Augmenter):
"""Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
"""
def __init__(self, hue):
super(HueJitterAug, self).__init__(hue=hue)
self.hue = hue
self.tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
self.ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
def __call__(self, src):
"""Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
"""
alpha = random.uniform(-self.hue, self.hue)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src
class ColorJitterAug(RandomOrderAug):
"""Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, brightness, contrast, saturation):
ts = []
if brightness > 0:
ts.append(BrightnessJitterAug(brightness))
if contrast > 0:
ts.append(ContrastJitterAug(contrast))
if saturation > 0:
ts.append(SaturationJitterAug(saturation))
super(ColorJitterAug, self).__init__(ts)
class LightingAug(Augmenter):
"""Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
"""
def __init__(self, alphastd, eigval, eigvec):
super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, src):
"""Augmenter body"""
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot(self.eigvec * alpha, self.eigval)
src += nd.array(rgb)
return src
class ColorNormalizeAug(Augmenter):
"""Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
"""
def __init__(self, mean, std):
super(ColorNormalizeAug, self).__init__(mean=mean, std=std)
self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)
self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)
def __call__(self, src):
"""Augmenter body"""
return color_normalize(src, self.mean, self.std)
class RandomGrayAug(Augmenter):
"""Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
"""
def __init__(self, p):
super(RandomGrayAug, self).__init__(p=p)
self.p = p
self.mat = nd.array([[0.21, 0.21, 0.21],
[0.72, 0.72, 0.72],
[0.07, 0.07, 0.07]])
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.dot(src, self.mat)
return src
class HorizontalFlipAug(Augmenter):
"""Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
"""
def __init__(self, p):
super(HorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
return src
class CastAug(Augmenter):
"""Cast to float32"""
def __init__(self, typ='float32'):
super(CastAug, self).__init__(type=typ)
self.typ = typ
def __call__(self, src):
"""Augmenter body"""
src = src.astype(self.typ)
return src
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', dtype='float32',
last_batch_handle='pad', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
' larger number to use more threads.')
class_name = self.__class__.__name__
if path_imgrec:
logging.info('%s: loading recordio %s...',
class_name, path_imgrec)
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array(line[1:-1], dtype=dtype)
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1], dtype=dtype)
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]], dtype=dtype)
else:
label = nd.array(img[0], dtype=dtype)
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self._allow_read = True
self.last_batch_handle = last_batch_handle
self.num_image = len(self.seq) if self.seq is not None else None
self._cache_data = None
self._cache_label = None
self._cache_idx = None
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
# clear the cache data
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
|
fixed_crop
|
Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read individual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import json
import warnings
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray import _internal
from ..ndarray._internal import _cvimresize as imresize
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .. import recordio
def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
return _internal._cvimread(filename, *args, **kwargs)
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
# MASKED: fixed_crop function (lines 292-321)
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if random.random() < 0.5:
new_h, new_w = new_w, new_h
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
class Augmenter(object):
"""Image Augmenter base class"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class SequentialAug(Augmenter):
"""Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
"""
def __init__(self, ts):
super(SequentialAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
for aug in self.ts:
src = aug(src)
return src
class ResizeAug(Augmenter):
"""Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return resize_short(src, self.size, self.interp)
class ForceResizeAug(Augmenter):
"""Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ForceResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))
class RandomCropAug(Augmenter):
"""Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(RandomCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return random_crop(src, self.size, self.interp)[0]
class RandomSizedCropAug(Augmenter):
"""Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, area, ratio, interp=2, **kwargs):
super(RandomSizedCropAug, self).__init__(size=size, area=area,
ratio=ratio, interp=interp)
self.size = size
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
self.area = kwargs.pop('min_area')
else:
self.area = area
self.ratio = ratio
self.interp = interp
assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`."
def __call__(self, src):
"""Augmenter body"""
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]
class CenterCropAug(Augmenter):
"""Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(CenterCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return center_crop(src, self.size, self.interp)[0]
class RandomOrderAug(Augmenter):
"""Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
"""
def __init__(self, ts):
super(RandomOrderAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src
class BrightnessJitterAug(Augmenter):
"""Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
"""
def __init__(self, brightness):
super(BrightnessJitterAug, self).__init__(brightness=brightness)
self.brightness = brightness
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
src *= alpha
return src
class ContrastJitterAug(Augmenter):
"""Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
"""
def __init__(self, contrast):
super(ContrastJitterAug, self).__init__(contrast=contrast)
self.contrast = contrast
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.contrast, self.contrast)
gray = src * self.coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
class SaturationJitterAug(Augmenter):
"""Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, saturation):
super(SaturationJitterAug, self).__init__(saturation=saturation)
self.saturation = saturation
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
gray = src * self.coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
class HueJitterAug(Augmenter):
"""Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
"""
def __init__(self, hue):
super(HueJitterAug, self).__init__(hue=hue)
self.hue = hue
self.tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
self.ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
def __call__(self, src):
"""Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
"""
alpha = random.uniform(-self.hue, self.hue)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src
class ColorJitterAug(RandomOrderAug):
"""Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, brightness, contrast, saturation):
ts = []
if brightness > 0:
ts.append(BrightnessJitterAug(brightness))
if contrast > 0:
ts.append(ContrastJitterAug(contrast))
if saturation > 0:
ts.append(SaturationJitterAug(saturation))
super(ColorJitterAug, self).__init__(ts)
class LightingAug(Augmenter):
"""Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
"""
def __init__(self, alphastd, eigval, eigvec):
super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, src):
"""Augmenter body"""
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot(self.eigvec * alpha, self.eigval)
src += nd.array(rgb)
return src
class ColorNormalizeAug(Augmenter):
"""Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
"""
def __init__(self, mean, std):
super(ColorNormalizeAug, self).__init__(mean=mean, std=std)
self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)
self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)
def __call__(self, src):
"""Augmenter body"""
return color_normalize(src, self.mean, self.std)
class RandomGrayAug(Augmenter):
"""Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
"""
def __init__(self, p):
super(RandomGrayAug, self).__init__(p=p)
self.p = p
self.mat = nd.array([[0.21, 0.21, 0.21],
[0.72, 0.72, 0.72],
[0.07, 0.07, 0.07]])
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.dot(src, self.mat)
return src
class HorizontalFlipAug(Augmenter):
"""Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
"""
def __init__(self, p):
super(HorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
return src
class CastAug(Augmenter):
"""Cast to float32"""
def __init__(self, typ='float32'):
super(CastAug, self).__init__(type=typ)
self.typ = typ
def __call__(self, src):
"""Augmenter body"""
src = src.astype(self.typ)
return src
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', dtype='float32',
last_batch_handle='pad', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
' larger number to use more threads.')
class_name = self.__class__.__name__
if path_imgrec:
logging.info('%s: loading recordio %s...',
class_name, path_imgrec)
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array(line[1:-1], dtype=dtype)
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1], dtype=dtype)
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]], dtype=dtype)
else:
label = nd.array(img[0], dtype=dtype)
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self._allow_read = True
self.last_batch_handle = last_batch_handle
self.num_image = len(self.seq) if self.seq is not None else None
self._cache_data = None
self._cache_label = None
self._cache_idx = None
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
# clear the cache data
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
|
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
| 292 | 321 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read individual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import json
import warnings
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray import _internal
from ..ndarray._internal import _cvimresize as imresize
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .. import recordio
def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
return _internal._cvimread(filename, *args, **kwargs)
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if random.random() < 0.5:
new_h, new_w = new_w, new_h
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
class Augmenter(object):
"""Image Augmenter base class"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class SequentialAug(Augmenter):
"""Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
"""
def __init__(self, ts):
super(SequentialAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
for aug in self.ts:
src = aug(src)
return src
class ResizeAug(Augmenter):
"""Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return resize_short(src, self.size, self.interp)
class ForceResizeAug(Augmenter):
"""Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ForceResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))
class RandomCropAug(Augmenter):
"""Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(RandomCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return random_crop(src, self.size, self.interp)[0]
class RandomSizedCropAug(Augmenter):
"""Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, area, ratio, interp=2, **kwargs):
super(RandomSizedCropAug, self).__init__(size=size, area=area,
ratio=ratio, interp=interp)
self.size = size
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
self.area = kwargs.pop('min_area')
else:
self.area = area
self.ratio = ratio
self.interp = interp
assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`."
def __call__(self, src):
"""Augmenter body"""
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]
class CenterCropAug(Augmenter):
"""Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(CenterCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return center_crop(src, self.size, self.interp)[0]
class RandomOrderAug(Augmenter):
"""Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
"""
def __init__(self, ts):
super(RandomOrderAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src
class BrightnessJitterAug(Augmenter):
"""Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
"""
def __init__(self, brightness):
super(BrightnessJitterAug, self).__init__(brightness=brightness)
self.brightness = brightness
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
src *= alpha
return src
class ContrastJitterAug(Augmenter):
"""Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
"""
def __init__(self, contrast):
super(ContrastJitterAug, self).__init__(contrast=contrast)
self.contrast = contrast
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.contrast, self.contrast)
gray = src * self.coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
class SaturationJitterAug(Augmenter):
"""Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, saturation):
super(SaturationJitterAug, self).__init__(saturation=saturation)
self.saturation = saturation
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
gray = src * self.coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
class HueJitterAug(Augmenter):
"""Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
"""
def __init__(self, hue):
super(HueJitterAug, self).__init__(hue=hue)
self.hue = hue
self.tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
self.ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
def __call__(self, src):
"""Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
"""
alpha = random.uniform(-self.hue, self.hue)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src
class ColorJitterAug(RandomOrderAug):
"""Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, brightness, contrast, saturation):
ts = []
if brightness > 0:
ts.append(BrightnessJitterAug(brightness))
if contrast > 0:
ts.append(ContrastJitterAug(contrast))
if saturation > 0:
ts.append(SaturationJitterAug(saturation))
super(ColorJitterAug, self).__init__(ts)
class LightingAug(Augmenter):
"""Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
"""
def __init__(self, alphastd, eigval, eigvec):
super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, src):
"""Augmenter body"""
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot(self.eigvec * alpha, self.eigval)
src += nd.array(rgb)
return src
class ColorNormalizeAug(Augmenter):
"""Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
"""
def __init__(self, mean, std):
super(ColorNormalizeAug, self).__init__(mean=mean, std=std)
self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)
self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)
def __call__(self, src):
"""Augmenter body"""
return color_normalize(src, self.mean, self.std)
class RandomGrayAug(Augmenter):
"""Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
"""
def __init__(self, p):
super(RandomGrayAug, self).__init__(p=p)
self.p = p
self.mat = nd.array([[0.21, 0.21, 0.21],
[0.72, 0.72, 0.72],
[0.07, 0.07, 0.07]])
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.dot(src, self.mat)
return src
class HorizontalFlipAug(Augmenter):
"""Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
"""
def __init__(self, p):
super(HorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
return src
class CastAug(Augmenter):
"""Cast to float32"""
def __init__(self, typ='float32'):
super(CastAug, self).__init__(type=typ)
self.typ = typ
def __call__(self, src):
"""Augmenter body"""
src = src.astype(self.typ)
return src
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', dtype='float32',
last_batch_handle='pad', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
' larger number to use more threads.')
class_name = self.__class__.__name__
if path_imgrec:
logging.info('%s: loading recordio %s...',
class_name, path_imgrec)
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array(line[1:-1], dtype=dtype)
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1], dtype=dtype)
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]], dtype=dtype)
else:
label = nd.array(img[0], dtype=dtype)
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self._allow_read = True
self.last_batch_handle = last_batch_handle
self.num_image = len(self.seq) if self.seq is not None else None
self._cache_data = None
self._cache_label = None
self._cache_idx = None
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
# clear the cache data
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
|
__call__
|
Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read individual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import json
import warnings
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray import _internal
from ..ndarray._internal import _cvimresize as imresize
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .. import recordio
def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
return _internal._cvimread(filename, *args, **kwargs)
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if random.random() < 0.5:
new_h, new_w = new_w, new_h
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
class Augmenter(object):
"""Image Augmenter base class"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class SequentialAug(Augmenter):
"""Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
"""
def __init__(self, ts):
super(SequentialAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
for aug in self.ts:
src = aug(src)
return src
class ResizeAug(Augmenter):
"""Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return resize_short(src, self.size, self.interp)
class ForceResizeAug(Augmenter):
"""Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ForceResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))
class RandomCropAug(Augmenter):
"""Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(RandomCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return random_crop(src, self.size, self.interp)[0]
class RandomSizedCropAug(Augmenter):
"""Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, area, ratio, interp=2, **kwargs):
super(RandomSizedCropAug, self).__init__(size=size, area=area,
ratio=ratio, interp=interp)
self.size = size
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
self.area = kwargs.pop('min_area')
else:
self.area = area
self.ratio = ratio
self.interp = interp
assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`."
def __call__(self, src):
"""Augmenter body"""
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]
class CenterCropAug(Augmenter):
"""Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(CenterCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return center_crop(src, self.size, self.interp)[0]
class RandomOrderAug(Augmenter):
"""Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
"""
def __init__(self, ts):
super(RandomOrderAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src
class BrightnessJitterAug(Augmenter):
"""Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
"""
def __init__(self, brightness):
super(BrightnessJitterAug, self).__init__(brightness=brightness)
self.brightness = brightness
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
src *= alpha
return src
class ContrastJitterAug(Augmenter):
"""Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
"""
def __init__(self, contrast):
super(ContrastJitterAug, self).__init__(contrast=contrast)
self.contrast = contrast
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.contrast, self.contrast)
gray = src * self.coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
class SaturationJitterAug(Augmenter):
"""Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, saturation):
super(SaturationJitterAug, self).__init__(saturation=saturation)
self.saturation = saturation
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
gray = src * self.coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
class HueJitterAug(Augmenter):
"""Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
"""
def __init__(self, hue):
super(HueJitterAug, self).__init__(hue=hue)
self.hue = hue
self.tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
self.ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
# MASKED: __call__ function (lines 765-778)
class ColorJitterAug(RandomOrderAug):
"""Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, brightness, contrast, saturation):
ts = []
if brightness > 0:
ts.append(BrightnessJitterAug(brightness))
if contrast > 0:
ts.append(ContrastJitterAug(contrast))
if saturation > 0:
ts.append(SaturationJitterAug(saturation))
super(ColorJitterAug, self).__init__(ts)
class LightingAug(Augmenter):
"""Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
"""
def __init__(self, alphastd, eigval, eigvec):
super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, src):
"""Augmenter body"""
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot(self.eigvec * alpha, self.eigval)
src += nd.array(rgb)
return src
class ColorNormalizeAug(Augmenter):
"""Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
"""
def __init__(self, mean, std):
super(ColorNormalizeAug, self).__init__(mean=mean, std=std)
self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)
self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)
def __call__(self, src):
"""Augmenter body"""
return color_normalize(src, self.mean, self.std)
class RandomGrayAug(Augmenter):
"""Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
"""
def __init__(self, p):
super(RandomGrayAug, self).__init__(p=p)
self.p = p
self.mat = nd.array([[0.21, 0.21, 0.21],
[0.72, 0.72, 0.72],
[0.07, 0.07, 0.07]])
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.dot(src, self.mat)
return src
class HorizontalFlipAug(Augmenter):
"""Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
"""
def __init__(self, p):
super(HorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
return src
class CastAug(Augmenter):
"""Cast to float32"""
def __init__(self, typ='float32'):
super(CastAug, self).__init__(type=typ)
self.typ = typ
def __call__(self, src):
"""Augmenter body"""
src = src.astype(self.typ)
return src
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', dtype='float32',
last_batch_handle='pad', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
' larger number to use more threads.')
class_name = self.__class__.__name__
if path_imgrec:
logging.info('%s: loading recordio %s...',
class_name, path_imgrec)
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array(line[1:-1], dtype=dtype)
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1], dtype=dtype)
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]], dtype=dtype)
else:
label = nd.array(img[0], dtype=dtype)
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self._allow_read = True
self.last_batch_handle = last_batch_handle
self.num_image = len(self.seq) if self.seq is not None else None
self._cache_data = None
self._cache_label = None
self._cache_idx = None
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
# clear the cache data
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
|
def __call__(self, src):
"""Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
"""
alpha = random.uniform(-self.hue, self.hue)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src
| 765 | 778 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read individual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import json
import warnings
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray import _internal
from ..ndarray._internal import _cvimresize as imresize
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .. import recordio
def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
return _internal._cvimread(filename, *args, **kwargs)
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if random.random() < 0.5:
new_h, new_w = new_w, new_h
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
class Augmenter(object):
"""Image Augmenter base class"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class SequentialAug(Augmenter):
"""Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
"""
def __init__(self, ts):
super(SequentialAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
for aug in self.ts:
src = aug(src)
return src
class ResizeAug(Augmenter):
"""Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return resize_short(src, self.size, self.interp)
class ForceResizeAug(Augmenter):
"""Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ForceResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))
class RandomCropAug(Augmenter):
"""Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(RandomCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return random_crop(src, self.size, self.interp)[0]
class RandomSizedCropAug(Augmenter):
"""Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, area, ratio, interp=2, **kwargs):
super(RandomSizedCropAug, self).__init__(size=size, area=area,
ratio=ratio, interp=interp)
self.size = size
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
self.area = kwargs.pop('min_area')
else:
self.area = area
self.ratio = ratio
self.interp = interp
assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`."
def __call__(self, src):
"""Augmenter body"""
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]
class CenterCropAug(Augmenter):
"""Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(CenterCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return center_crop(src, self.size, self.interp)[0]
class RandomOrderAug(Augmenter):
"""Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
"""
def __init__(self, ts):
super(RandomOrderAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src
class BrightnessJitterAug(Augmenter):
"""Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
"""
def __init__(self, brightness):
super(BrightnessJitterAug, self).__init__(brightness=brightness)
self.brightness = brightness
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
src *= alpha
return src
class ContrastJitterAug(Augmenter):
"""Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
"""
def __init__(self, contrast):
super(ContrastJitterAug, self).__init__(contrast=contrast)
self.contrast = contrast
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.contrast, self.contrast)
gray = src * self.coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
class SaturationJitterAug(Augmenter):
"""Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, saturation):
super(SaturationJitterAug, self).__init__(saturation=saturation)
self.saturation = saturation
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
gray = src * self.coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
class HueJitterAug(Augmenter):
"""Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
"""
def __init__(self, hue):
super(HueJitterAug, self).__init__(hue=hue)
self.hue = hue
self.tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
self.ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
def __call__(self, src):
"""Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
"""
alpha = random.uniform(-self.hue, self.hue)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src
class ColorJitterAug(RandomOrderAug):
"""Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, brightness, contrast, saturation):
ts = []
if brightness > 0:
ts.append(BrightnessJitterAug(brightness))
if contrast > 0:
ts.append(ContrastJitterAug(contrast))
if saturation > 0:
ts.append(SaturationJitterAug(saturation))
super(ColorJitterAug, self).__init__(ts)
class LightingAug(Augmenter):
"""Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
"""
def __init__(self, alphastd, eigval, eigvec):
super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, src):
"""Augmenter body"""
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot(self.eigvec * alpha, self.eigval)
src += nd.array(rgb)
return src
class ColorNormalizeAug(Augmenter):
"""Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
"""
def __init__(self, mean, std):
super(ColorNormalizeAug, self).__init__(mean=mean, std=std)
self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)
self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)
def __call__(self, src):
"""Augmenter body"""
return color_normalize(src, self.mean, self.std)
class RandomGrayAug(Augmenter):
"""Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
"""
def __init__(self, p):
super(RandomGrayAug, self).__init__(p=p)
self.p = p
self.mat = nd.array([[0.21, 0.21, 0.21],
[0.72, 0.72, 0.72],
[0.07, 0.07, 0.07]])
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.dot(src, self.mat)
return src
class HorizontalFlipAug(Augmenter):
"""Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
"""
def __init__(self, p):
super(HorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
return src
class CastAug(Augmenter):
"""Cast to float32"""
def __init__(self, typ='float32'):
super(CastAug, self).__init__(type=typ)
self.typ = typ
def __call__(self, src):
"""Augmenter body"""
src = src.astype(self.typ)
return src
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', dtype='float32',
last_batch_handle='pad', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
' larger number to use more threads.')
class_name = self.__class__.__name__
if path_imgrec:
logging.info('%s: loading recordio %s...',
class_name, path_imgrec)
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array(line[1:-1], dtype=dtype)
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1], dtype=dtype)
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]], dtype=dtype)
else:
label = nd.array(img[0], dtype=dtype)
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self._allow_read = True
self.last_batch_handle = last_batch_handle
self.num_image = len(self.seq) if self.seq is not None else None
self._cache_data = None
self._cache_label = None
self._cache_idx = None
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
# clear the cache data
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
|
gen_base_anchors
|
Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple feature levels.
|
import warnings
import mmcv
import numpy as np
import torch
from torch.nn.modules.utils import _pair
from mmdet.core.anchor.builder import ANCHOR_GENERATORS
from mmdet.core.anchor import AnchorGenerator
@ANCHOR_GENERATORS.register_module(force=True)
class SSDAnchorGenerator(AnchorGenerator):
"""Anchor generator for SSD
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
basesize_ratio_range (tuple(float)): Ratio range of anchors.
input_size (int): Size of feature map, 300 for SSD300,
512 for SSD512.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. It is always set to be False in SSD.
"""
def __init__(self,
strides,
ratios,
basesize_ratio_range,
input_size=300,
scale_major=True):
assert len(strides) == len(ratios)
assert mmcv.is_tuple_of(basesize_ratio_range, float)
self.strides = [_pair(stride) for stride in strides]
self.input_size = max(input_size) if isinstance(input_size, (list,tuple)) else input_size
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
self.basesize_ratio_range = basesize_ratio_range
# calculate anchor ratios and sizes
min_ratio, max_ratio = basesize_ratio_range
min_ratio = int(min_ratio * 100)
max_ratio = int(max_ratio * 100)
step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
min_sizes = []
max_sizes = []
for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
min_sizes.append(int(self.input_size * ratio / 100))
max_sizes.append(int(self.input_size * (ratio + step) / 100))
if self.input_size == 300:
if basesize_ratio_range[0] == 0.15: # SSD300 COCO
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
min_sizes.insert(0, int(self.input_size * 10 / 100))
max_sizes.insert(0, int(self.input_size * 20 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
warnings.warn(
'according to original SSD, basesize_ratio_range[0] should be either 0.15'
'or 0.2 when input_size is 300, got '
f'{basesize_ratio_range[0]}.')
elif self.input_size == 512:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
warnings.warn('according to original SSD, basesize_ratio_range[0] should be either 0.1'
'or 0.15 when input_size is 512, got'
f' {basesize_ratio_range[0]}.')
else:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
anchor_ratios = []
anchor_scales = []
for k in range(len(self.strides)):
scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
anchor_ratio = [1.]
for r in ratios[k]:
anchor_ratio += [1 / r, r] # 4 or 6 ratio
anchor_ratios.append(torch.Tensor(anchor_ratio))
anchor_scales.append(torch.Tensor(scales))
self.base_sizes = min_sizes
self.scales = anchor_scales
self.ratios = anchor_ratios
self.scale_major = scale_major
self.center_offset = 0
self.base_anchors = self.gen_base_anchors()
# added for proto export
self.min_sizes = min_sizes
self.max_sizes = max_sizes
# MASKED: gen_base_anchors function (lines 107-126)
def __repr__(self):
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}input_size={self.input_size},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}num_levels={self.num_levels},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}basesize_ratio_range='
repr_str += f'{self.basesize_ratio_range})'
return repr_str
|
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
base_anchors = self.gen_single_level_base_anchors(
base_size,
scales=self.scales[i],
ratios=self.ratios[i],
center=self.centers[i])
indices = list(range(len(self.ratios[i])))
indices.insert(1, len(indices))
base_anchors = torch.index_select(base_anchors, 0,
torch.LongTensor(indices))
multi_level_base_anchors.append(base_anchors)
return multi_level_base_anchors
| 107 | 126 |
import warnings
import mmcv
import numpy as np
import torch
from torch.nn.modules.utils import _pair
from mmdet.core.anchor.builder import ANCHOR_GENERATORS
from mmdet.core.anchor import AnchorGenerator
@ANCHOR_GENERATORS.register_module(force=True)
class SSDAnchorGenerator(AnchorGenerator):
"""Anchor generator for SSD
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
basesize_ratio_range (tuple(float)): Ratio range of anchors.
input_size (int): Size of feature map, 300 for SSD300,
512 for SSD512.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. It is always set to be False in SSD.
"""
def __init__(self,
strides,
ratios,
basesize_ratio_range,
input_size=300,
scale_major=True):
assert len(strides) == len(ratios)
assert mmcv.is_tuple_of(basesize_ratio_range, float)
self.strides = [_pair(stride) for stride in strides]
self.input_size = max(input_size) if isinstance(input_size, (list,tuple)) else input_size
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
self.basesize_ratio_range = basesize_ratio_range
# calculate anchor ratios and sizes
min_ratio, max_ratio = basesize_ratio_range
min_ratio = int(min_ratio * 100)
max_ratio = int(max_ratio * 100)
step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
min_sizes = []
max_sizes = []
for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
min_sizes.append(int(self.input_size * ratio / 100))
max_sizes.append(int(self.input_size * (ratio + step) / 100))
if self.input_size == 300:
if basesize_ratio_range[0] == 0.15: # SSD300 COCO
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
min_sizes.insert(0, int(self.input_size * 10 / 100))
max_sizes.insert(0, int(self.input_size * 20 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
warnings.warn(
'according to original SSD, basesize_ratio_range[0] should be either 0.15'
'or 0.2 when input_size is 300, got '
f'{basesize_ratio_range[0]}.')
elif self.input_size == 512:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
warnings.warn('according to original SSD, basesize_ratio_range[0] should be either 0.1'
'or 0.15 when input_size is 512, got'
f' {basesize_ratio_range[0]}.')
else:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
anchor_ratios = []
anchor_scales = []
for k in range(len(self.strides)):
scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
anchor_ratio = [1.]
for r in ratios[k]:
anchor_ratio += [1 / r, r] # 4 or 6 ratio
anchor_ratios.append(torch.Tensor(anchor_ratio))
anchor_scales.append(torch.Tensor(scales))
self.base_sizes = min_sizes
self.scales = anchor_scales
self.ratios = anchor_ratios
self.scale_major = scale_major
self.center_offset = 0
self.base_anchors = self.gen_base_anchors()
# added for proto export
self.min_sizes = min_sizes
self.max_sizes = max_sizes
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
base_anchors = self.gen_single_level_base_anchors(
base_size,
scales=self.scales[i],
ratios=self.ratios[i],
center=self.centers[i])
indices = list(range(len(self.ratios[i])))
indices.insert(1, len(indices))
base_anchors = torch.index_select(base_anchors, 0,
torch.LongTensor(indices))
multi_level_base_anchors.append(base_anchors)
return multi_level_base_anchors
def __repr__(self):
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}input_size={self.input_size},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}num_levels={self.num_levels},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}basesize_ratio_range='
repr_str += f'{self.basesize_ratio_range})'
return repr_str
|
initiate_upgrade_connection
|
Initiate an upgrade connection.
This should be used if the request has already be received and
parsed.
:param list headers: HTTP headers represented as a list of 2-tuples.
:param str path: A URL path.
|
# -*- coding: utf-8 -*-
"""
wsproto/handshake
~~~~~~~~~~~~~~~~~~
An implementation of WebSocket handshakes.
"""
from collections import deque
from typing import Deque, Dict, Generator, List, Optional, Union
import h11
from .connection import Connection, ConnectionState, ConnectionType
from .events import AcceptConnection, Event, RejectConnection, RejectData, Request
from .extensions import Extension
from .typing import Headers
from .utilities import (
generate_accept_token,
generate_nonce,
LocalProtocolError,
normed_header_dict,
RemoteProtocolError,
split_comma_header,
)
# RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake
WEBSOCKET_VERSION = b"13"
class H11Handshake:
"""A Handshake implementation for HTTP/1.1 connections."""
def __init__(self, connection_type: ConnectionType) -> None:
self.client = connection_type is ConnectionType.CLIENT
self._state = ConnectionState.CONNECTING
if self.client:
self._h11_connection = h11.Connection(h11.CLIENT)
else:
self._h11_connection = h11.Connection(h11.SERVER)
self._connection: Optional[Connection] = None
self._events: Deque[Event] = deque()
self._initiating_request: Optional[Request] = None
self._nonce: Optional[bytes] = None
@property
def state(self) -> ConnectionState:
return self._state
@property
def connection(self) -> Optional[Connection]:
"""Return the established connection.
This will either return the connection or raise a
LocalProtocolError if the connection has not yet been
established.
:rtype: h11.Connection
"""
return self._connection
# MASKED: initiate_upgrade_connection function (lines 63-78)
def send(self, event: Event) -> bytes:
"""Send an event to the remote.
This will return the bytes to send based on the event or raise
a LocalProtocolError if the event is not valid given the
state.
:returns: Data to send to the WebSocket peer.
:rtype: bytes
"""
data = b""
if isinstance(event, Request):
data += self._initiate_connection(event)
elif isinstance(event, AcceptConnection):
data += self._accept(event)
elif isinstance(event, RejectConnection):
data += self._reject(event)
elif isinstance(event, RejectData):
data += self._send_reject_data(event)
else:
raise LocalProtocolError(
"Event {} cannot be sent during the handshake".format(event)
)
return data
def receive_data(self, data: bytes) -> None:
"""Receive data from the remote.
A list of events that the remote peer triggered by sending
this data can be retrieved with :meth:`events`.
:param bytes data: Data received from the WebSocket peer.
"""
self._h11_connection.receive_data(data)
while True:
try:
event = self._h11_connection.next_event()
except h11.RemoteProtocolError:
raise RemoteProtocolError(
"Bad HTTP message", event_hint=RejectConnection()
)
if (
isinstance(event, h11.ConnectionClosed)
or event is h11.NEED_DATA
or event is h11.PAUSED
):
break
if self.client:
if isinstance(event, h11.InformationalResponse):
if event.status_code == 101:
self._events.append(self._establish_client_connection(event))
else:
self._events.append(
RejectConnection(
headers=event.headers,
status_code=event.status_code,
has_body=False,
)
)
self._state = ConnectionState.CLOSED
elif isinstance(event, h11.Response):
self._state = ConnectionState.REJECTING
self._events.append(
RejectConnection(
headers=event.headers,
status_code=event.status_code,
has_body=True,
)
)
elif isinstance(event, h11.Data):
self._events.append(
RejectData(data=event.data, body_finished=False)
)
elif isinstance(event, h11.EndOfMessage):
self._events.append(RejectData(data=b"", body_finished=True))
self._state = ConnectionState.CLOSED
else:
if isinstance(event, h11.Request):
self._events.append(self._process_connection_request(event))
def events(self) -> Generator[Event, None, None]:
"""Return a generator that provides any events that have been generated
by protocol activity.
:returns: a generator that yields H11 events.
"""
while self._events:
yield self._events.popleft()
############ Server mode methods
def _process_connection_request(self, event: h11.Request) -> Request:
if event.method != b"GET":
raise RemoteProtocolError(
"Request method must be GET", event_hint=RejectConnection()
)
connection_tokens = None
extensions: List[str] = []
host = None
key = None
subprotocols: List[str] = []
upgrade = b""
version = None
headers: Headers = []
for name, value in event.headers:
name = name.lower()
if name == b"connection":
connection_tokens = split_comma_header(value)
elif name == b"host":
host = value.decode("ascii")
continue # Skip appending to headers
elif name == b"sec-websocket-extensions":
extensions = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-key":
key = value
elif name == b"sec-websocket-protocol":
subprotocols = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-version":
version = value
elif name == b"upgrade":
upgrade = value
headers.append((name, value))
if connection_tokens is None or not any(
token.lower() == "upgrade" for token in connection_tokens
):
raise RemoteProtocolError(
"Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
)
if version != WEBSOCKET_VERSION:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Version'",
event_hint=RejectConnection(
headers=[(b"Sec-WebSocket-Version", WEBSOCKET_VERSION)],
status_code=426,
),
)
if key is None:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Key'", event_hint=RejectConnection()
)
if upgrade.lower() != b"websocket":
raise RemoteProtocolError(
"Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
)
if version is None:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Version'", event_hint=RejectConnection()
)
self._initiating_request = Request(
extensions=extensions,
extra_headers=headers,
host=host,
subprotocols=subprotocols,
target=event.target.decode("ascii"),
)
return self._initiating_request
def _accept(self, event: AcceptConnection) -> bytes:
request_headers = normed_header_dict(self._initiating_request.extra_headers)
nonce = request_headers[b"sec-websocket-key"]
accept_token = generate_accept_token(nonce)
headers = [
(b"Upgrade", b"WebSocket"),
(b"Connection", b"Upgrade"),
(b"Sec-WebSocket-Accept", accept_token),
]
if event.subprotocol is not None:
if event.subprotocol not in self._initiating_request.subprotocols:
raise LocalProtocolError(
"unexpected subprotocol {}".format(event.subprotocol)
)
headers.append(
(b"Sec-WebSocket-Protocol", event.subprotocol.encode("ascii"))
)
if event.extensions:
accepts = server_extensions_handshake( # type: ignore
self._initiating_request.extensions, event.extensions
)
if accepts:
headers.append((b"Sec-WebSocket-Extensions", accepts))
response = h11.InformationalResponse(
status_code=101, headers=headers + event.extra_headers
)
self._connection = Connection(
ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
event.extensions,
)
self._state = ConnectionState.OPEN
return self._h11_connection.send(response)
def _reject(self, event: RejectConnection) -> bytes:
if self.state != ConnectionState.CONNECTING:
raise LocalProtocolError(
"Connection cannot be rejected in state %s" % self.state
)
headers = event.headers
if not event.has_body:
headers.append((b"content-length", b"0"))
response = h11.Response(status_code=event.status_code, headers=headers)
data = self._h11_connection.send(response)
self._state = ConnectionState.REJECTING
if not event.has_body:
data += self._h11_connection.send(h11.EndOfMessage())
self._state = ConnectionState.CLOSED
return data
def _send_reject_data(self, event: RejectData) -> bytes:
if self.state != ConnectionState.REJECTING:
raise LocalProtocolError(
"Cannot send rejection data in state {}".format(self.state)
)
data = self._h11_connection.send(h11.Data(data=event.data))
if event.body_finished:
data += self._h11_connection.send(h11.EndOfMessage())
self._state = ConnectionState.CLOSED
return data
############ Client mode methods
def _initiate_connection(self, request: Request) -> bytes:
self._initiating_request = request
self._nonce = generate_nonce()
headers = [
(b"Host", request.host.encode("ascii")),
(b"Upgrade", b"WebSocket"),
(b"Connection", b"Upgrade"),
(b"Sec-WebSocket-Key", self._nonce),
(b"Sec-WebSocket-Version", WEBSOCKET_VERSION),
]
if request.subprotocols:
headers.append(
(
b"Sec-WebSocket-Protocol",
(", ".join(request.subprotocols)).encode("ascii"),
)
)
if request.extensions:
offers = {e.name: e.offer() for e in request.extensions} # type: ignore
extensions = []
for name, params in offers.items():
name = name.encode("ascii")
if params is True:
extensions.append(name)
elif params:
extensions.append(
b"%s; %s" % (name, params.encode("ascii")) # type: ignore
)
if extensions:
headers.append((b"Sec-WebSocket-Extensions", b", ".join(extensions)))
upgrade = h11.Request(
method=b"GET",
target=request.target.encode("ascii"),
headers=headers + request.extra_headers,
)
return self._h11_connection.send(upgrade)
def _establish_client_connection(
self, event: h11.InformationalResponse
) -> AcceptConnection: # noqa: MC0001
accept = None
connection_tokens = None
accepts: List[str] = []
subprotocol = None
upgrade = b""
headers: Headers = []
for name, value in event.headers:
name = name.lower()
if name == b"connection":
connection_tokens = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-extensions":
accepts = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-accept":
accept = value
continue # Skip appending to headers
elif name == b"sec-websocket-protocol":
subprotocol = value
continue # Skip appending to headers
elif name == b"upgrade":
upgrade = value
continue # Skip appending to headers
headers.append((name, value))
if connection_tokens is None or not any(
token.lower() == "upgrade" for token in connection_tokens
):
raise RemoteProtocolError(
"Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
)
if upgrade.lower() != b"websocket":
raise RemoteProtocolError(
"Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
)
accept_token = generate_accept_token(self._nonce)
if accept != accept_token:
raise RemoteProtocolError("Bad accept token", event_hint=RejectConnection())
if subprotocol is not None:
subprotocol = subprotocol.decode("ascii")
if subprotocol not in self._initiating_request.subprotocols:
raise RemoteProtocolError(
"unrecognized subprotocol {}".format(subprotocol),
event_hint=RejectConnection(),
)
extensions = client_extensions_handshake( # type: ignore
accepts, self._initiating_request.extensions
)
self._connection = Connection(
ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
extensions,
self._h11_connection.trailing_data[0],
)
self._state = ConnectionState.OPEN
return AcceptConnection(
extensions=extensions, extra_headers=headers, subprotocol=subprotocol
)
def __repr__(self) -> str:
return "{}(client={}, state={})".format(
self.__class__.__name__, self.client, self.state
)
def server_extensions_handshake(
requested: List[str], supported: List[Extension]
) -> Optional[bytes]:
"""Agree on the extensions to use returning an appropriate header value.
This returns None if there are no agreed extensions
"""
accepts: Dict[str, Union[bool, bytes]] = {}
for offer in requested:
name = offer.split(";", 1)[0].strip()
for extension in supported:
if extension.name == name:
accept = extension.accept(offer)
if accept is True:
accepts[extension.name] = True
elif accept is not False and accept is not None:
accepts[extension.name] = accept.encode("ascii") # type: ignore
if accepts:
extensions: List[bytes] = []
for name, params in accepts.items():
name = name.encode("ascii") # type: ignore
if params is True:
extensions.append(name) # type: ignore
else:
if params == b"":
extensions.append(b"%s" % (name))
else:
extensions.append(b"%s; %s" % (name, params))
return b", ".join(extensions)
return None
def client_extensions_handshake(
accepted: List[str], supported: List[Extension]
) -> List[Extension]:
# This raises RemoteProtocolError is the accepted extension is not
# supported.
extensions = []
for accept in accepted:
name = accept.split(";", 1)[0].strip()
for extension in supported:
if extension.name == name:
extension.finalize(accept)
extensions.append(extension)
break
else:
raise RemoteProtocolError(
"unrecognized extension {}".format(name), event_hint=RejectConnection()
)
return extensions
|
def initiate_upgrade_connection(self, headers: Headers, path: str) -> None:
"""Initiate an upgrade connection.
This should be used if the request has already be received and
parsed.
:param list headers: HTTP headers represented as a list of 2-tuples.
:param str path: A URL path.
"""
if self.client:
raise LocalProtocolError(
"Cannot initiate an upgrade connection when acting as the client"
)
upgrade_request = h11.Request(method=b"GET", target=path, headers=headers)
h11_client = h11.Connection(h11.CLIENT)
self.receive_data(h11_client.send(upgrade_request))
| 63 | 78 |
# -*- coding: utf-8 -*-
"""
wsproto/handshake
~~~~~~~~~~~~~~~~~~
An implementation of WebSocket handshakes.
"""
from collections import deque
from typing import Deque, Dict, Generator, List, Optional, Union
import h11
from .connection import Connection, ConnectionState, ConnectionType
from .events import AcceptConnection, Event, RejectConnection, RejectData, Request
from .extensions import Extension
from .typing import Headers
from .utilities import (
generate_accept_token,
generate_nonce,
LocalProtocolError,
normed_header_dict,
RemoteProtocolError,
split_comma_header,
)
# RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake
WEBSOCKET_VERSION = b"13"
class H11Handshake:
"""A Handshake implementation for HTTP/1.1 connections."""
def __init__(self, connection_type: ConnectionType) -> None:
self.client = connection_type is ConnectionType.CLIENT
self._state = ConnectionState.CONNECTING
if self.client:
self._h11_connection = h11.Connection(h11.CLIENT)
else:
self._h11_connection = h11.Connection(h11.SERVER)
self._connection: Optional[Connection] = None
self._events: Deque[Event] = deque()
self._initiating_request: Optional[Request] = None
self._nonce: Optional[bytes] = None
@property
def state(self) -> ConnectionState:
return self._state
@property
def connection(self) -> Optional[Connection]:
"""Return the established connection.
This will either return the connection or raise a
LocalProtocolError if the connection has not yet been
established.
:rtype: h11.Connection
"""
return self._connection
def initiate_upgrade_connection(self, headers: Headers, path: str) -> None:
"""Initiate an upgrade connection.
This should be used if the request has already be received and
parsed.
:param list headers: HTTP headers represented as a list of 2-tuples.
:param str path: A URL path.
"""
if self.client:
raise LocalProtocolError(
"Cannot initiate an upgrade connection when acting as the client"
)
upgrade_request = h11.Request(method=b"GET", target=path, headers=headers)
h11_client = h11.Connection(h11.CLIENT)
self.receive_data(h11_client.send(upgrade_request))
def send(self, event: Event) -> bytes:
"""Send an event to the remote.
This will return the bytes to send based on the event or raise
a LocalProtocolError if the event is not valid given the
state.
:returns: Data to send to the WebSocket peer.
:rtype: bytes
"""
data = b""
if isinstance(event, Request):
data += self._initiate_connection(event)
elif isinstance(event, AcceptConnection):
data += self._accept(event)
elif isinstance(event, RejectConnection):
data += self._reject(event)
elif isinstance(event, RejectData):
data += self._send_reject_data(event)
else:
raise LocalProtocolError(
"Event {} cannot be sent during the handshake".format(event)
)
return data
def receive_data(self, data: bytes) -> None:
"""Receive data from the remote.
A list of events that the remote peer triggered by sending
this data can be retrieved with :meth:`events`.
:param bytes data: Data received from the WebSocket peer.
"""
self._h11_connection.receive_data(data)
while True:
try:
event = self._h11_connection.next_event()
except h11.RemoteProtocolError:
raise RemoteProtocolError(
"Bad HTTP message", event_hint=RejectConnection()
)
if (
isinstance(event, h11.ConnectionClosed)
or event is h11.NEED_DATA
or event is h11.PAUSED
):
break
if self.client:
if isinstance(event, h11.InformationalResponse):
if event.status_code == 101:
self._events.append(self._establish_client_connection(event))
else:
self._events.append(
RejectConnection(
headers=event.headers,
status_code=event.status_code,
has_body=False,
)
)
self._state = ConnectionState.CLOSED
elif isinstance(event, h11.Response):
self._state = ConnectionState.REJECTING
self._events.append(
RejectConnection(
headers=event.headers,
status_code=event.status_code,
has_body=True,
)
)
elif isinstance(event, h11.Data):
self._events.append(
RejectData(data=event.data, body_finished=False)
)
elif isinstance(event, h11.EndOfMessage):
self._events.append(RejectData(data=b"", body_finished=True))
self._state = ConnectionState.CLOSED
else:
if isinstance(event, h11.Request):
self._events.append(self._process_connection_request(event))
def events(self) -> Generator[Event, None, None]:
"""Return a generator that provides any events that have been generated
by protocol activity.
:returns: a generator that yields H11 events.
"""
while self._events:
yield self._events.popleft()
############ Server mode methods
def _process_connection_request(self, event: h11.Request) -> Request:
if event.method != b"GET":
raise RemoteProtocolError(
"Request method must be GET", event_hint=RejectConnection()
)
connection_tokens = None
extensions: List[str] = []
host = None
key = None
subprotocols: List[str] = []
upgrade = b""
version = None
headers: Headers = []
for name, value in event.headers:
name = name.lower()
if name == b"connection":
connection_tokens = split_comma_header(value)
elif name == b"host":
host = value.decode("ascii")
continue # Skip appending to headers
elif name == b"sec-websocket-extensions":
extensions = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-key":
key = value
elif name == b"sec-websocket-protocol":
subprotocols = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-version":
version = value
elif name == b"upgrade":
upgrade = value
headers.append((name, value))
if connection_tokens is None or not any(
token.lower() == "upgrade" for token in connection_tokens
):
raise RemoteProtocolError(
"Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
)
if version != WEBSOCKET_VERSION:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Version'",
event_hint=RejectConnection(
headers=[(b"Sec-WebSocket-Version", WEBSOCKET_VERSION)],
status_code=426,
),
)
if key is None:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Key'", event_hint=RejectConnection()
)
if upgrade.lower() != b"websocket":
raise RemoteProtocolError(
"Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
)
if version is None:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Version'", event_hint=RejectConnection()
)
self._initiating_request = Request(
extensions=extensions,
extra_headers=headers,
host=host,
subprotocols=subprotocols,
target=event.target.decode("ascii"),
)
return self._initiating_request
def _accept(self, event: AcceptConnection) -> bytes:
request_headers = normed_header_dict(self._initiating_request.extra_headers)
nonce = request_headers[b"sec-websocket-key"]
accept_token = generate_accept_token(nonce)
headers = [
(b"Upgrade", b"WebSocket"),
(b"Connection", b"Upgrade"),
(b"Sec-WebSocket-Accept", accept_token),
]
if event.subprotocol is not None:
if event.subprotocol not in self._initiating_request.subprotocols:
raise LocalProtocolError(
"unexpected subprotocol {}".format(event.subprotocol)
)
headers.append(
(b"Sec-WebSocket-Protocol", event.subprotocol.encode("ascii"))
)
if event.extensions:
accepts = server_extensions_handshake( # type: ignore
self._initiating_request.extensions, event.extensions
)
if accepts:
headers.append((b"Sec-WebSocket-Extensions", accepts))
response = h11.InformationalResponse(
status_code=101, headers=headers + event.extra_headers
)
self._connection = Connection(
ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
event.extensions,
)
self._state = ConnectionState.OPEN
return self._h11_connection.send(response)
def _reject(self, event: RejectConnection) -> bytes:
if self.state != ConnectionState.CONNECTING:
raise LocalProtocolError(
"Connection cannot be rejected in state %s" % self.state
)
headers = event.headers
if not event.has_body:
headers.append((b"content-length", b"0"))
response = h11.Response(status_code=event.status_code, headers=headers)
data = self._h11_connection.send(response)
self._state = ConnectionState.REJECTING
if not event.has_body:
data += self._h11_connection.send(h11.EndOfMessage())
self._state = ConnectionState.CLOSED
return data
def _send_reject_data(self, event: RejectData) -> bytes:
if self.state != ConnectionState.REJECTING:
raise LocalProtocolError(
"Cannot send rejection data in state {}".format(self.state)
)
data = self._h11_connection.send(h11.Data(data=event.data))
if event.body_finished:
data += self._h11_connection.send(h11.EndOfMessage())
self._state = ConnectionState.CLOSED
return data
############ Client mode methods
def _initiate_connection(self, request: Request) -> bytes:
self._initiating_request = request
self._nonce = generate_nonce()
headers = [
(b"Host", request.host.encode("ascii")),
(b"Upgrade", b"WebSocket"),
(b"Connection", b"Upgrade"),
(b"Sec-WebSocket-Key", self._nonce),
(b"Sec-WebSocket-Version", WEBSOCKET_VERSION),
]
if request.subprotocols:
headers.append(
(
b"Sec-WebSocket-Protocol",
(", ".join(request.subprotocols)).encode("ascii"),
)
)
if request.extensions:
offers = {e.name: e.offer() for e in request.extensions} # type: ignore
extensions = []
for name, params in offers.items():
name = name.encode("ascii")
if params is True:
extensions.append(name)
elif params:
extensions.append(
b"%s; %s" % (name, params.encode("ascii")) # type: ignore
)
if extensions:
headers.append((b"Sec-WebSocket-Extensions", b", ".join(extensions)))
upgrade = h11.Request(
method=b"GET",
target=request.target.encode("ascii"),
headers=headers + request.extra_headers,
)
return self._h11_connection.send(upgrade)
def _establish_client_connection(
self, event: h11.InformationalResponse
) -> AcceptConnection: # noqa: MC0001
accept = None
connection_tokens = None
accepts: List[str] = []
subprotocol = None
upgrade = b""
headers: Headers = []
for name, value in event.headers:
name = name.lower()
if name == b"connection":
connection_tokens = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-extensions":
accepts = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-accept":
accept = value
continue # Skip appending to headers
elif name == b"sec-websocket-protocol":
subprotocol = value
continue # Skip appending to headers
elif name == b"upgrade":
upgrade = value
continue # Skip appending to headers
headers.append((name, value))
if connection_tokens is None or not any(
token.lower() == "upgrade" for token in connection_tokens
):
raise RemoteProtocolError(
"Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
)
if upgrade.lower() != b"websocket":
raise RemoteProtocolError(
"Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
)
accept_token = generate_accept_token(self._nonce)
if accept != accept_token:
raise RemoteProtocolError("Bad accept token", event_hint=RejectConnection())
if subprotocol is not None:
subprotocol = subprotocol.decode("ascii")
if subprotocol not in self._initiating_request.subprotocols:
raise RemoteProtocolError(
"unrecognized subprotocol {}".format(subprotocol),
event_hint=RejectConnection(),
)
extensions = client_extensions_handshake( # type: ignore
accepts, self._initiating_request.extensions
)
self._connection = Connection(
ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
extensions,
self._h11_connection.trailing_data[0],
)
self._state = ConnectionState.OPEN
return AcceptConnection(
extensions=extensions, extra_headers=headers, subprotocol=subprotocol
)
def __repr__(self) -> str:
return "{}(client={}, state={})".format(
self.__class__.__name__, self.client, self.state
)
def server_extensions_handshake(
requested: List[str], supported: List[Extension]
) -> Optional[bytes]:
"""Agree on the extensions to use returning an appropriate header value.
This returns None if there are no agreed extensions
"""
accepts: Dict[str, Union[bool, bytes]] = {}
for offer in requested:
name = offer.split(";", 1)[0].strip()
for extension in supported:
if extension.name == name:
accept = extension.accept(offer)
if accept is True:
accepts[extension.name] = True
elif accept is not False and accept is not None:
accepts[extension.name] = accept.encode("ascii") # type: ignore
if accepts:
extensions: List[bytes] = []
for name, params in accepts.items():
name = name.encode("ascii") # type: ignore
if params is True:
extensions.append(name) # type: ignore
else:
if params == b"":
extensions.append(b"%s" % (name))
else:
extensions.append(b"%s; %s" % (name, params))
return b", ".join(extensions)
return None
def client_extensions_handshake(
accepted: List[str], supported: List[Extension]
) -> List[Extension]:
# This raises RemoteProtocolError is the accepted extension is not
# supported.
extensions = []
for accept in accepted:
name = accept.split(";", 1)[0].strip()
for extension in supported:
if extension.name == name:
extension.finalize(accept)
extensions.append(extension)
break
else:
raise RemoteProtocolError(
"unrecognized extension {}".format(name), event_hint=RejectConnection()
)
return extensions
|
register
|
Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP
|
"""
switchboard.manager
~~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import logging
import sqlalchemy as sqla
from .base import ModelDict
from .models import (
Model,
Switch,
DISABLED, SELECTIVE, GLOBAL, INHERIT,
INCLUDE, EXCLUDE,
)
from .proxy import SwitchProxy
from .settings import settings, Settings
from .store import SQLAlchemyStore
log = logging.getLogger(__name__)
# These are (mostly) read-only module variables since we want it shared among
# any and all threads. The only exception to read-only is when they are
# populated on Switchboard startup (i.e., operator.register()).
registry = {}
registry_by_namespace = {}
def nested_config(config):
cfg = {}
token = 'switchboard.'
for k, v in config.iteritems():
if k.startswith(token):
cfg[k.replace(token, '')] = v
return cfg
def configure(config={}, nested=False, cache=None):
"""Useful for when you need to control Switchboard's setup."""
if nested:
config = nested_config(config)
# Re-read settings to make sure we have everything.
Settings.init(cache=cache, **config)
operator.cache = cache
# Establish the connection to the database.
timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10)
dburl = settings.SWITCHBOARD_DBURL
if dburl:
engine = sqla.create_engine(
dburl, connect_args={'connect_timeout': timeout})
Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE)
# Register the builtins.
__import__('switchboard.builtins')
class SwitchManager(ModelDict):
DISABLED = DISABLED
SELECTIVE = SELECTIVE
GLOBAL = GLOBAL
INHERIT = INHERIT
INCLUDE = INCLUDE
EXCLUDE = EXCLUDE
def __init__(self, *args, **kwargs):
# Inject args and kwargs that are known quantities; the SwitchManager
# will always deal with the Switch model and so on.
new_args = [Switch]
new_args.extend(args)
kwargs['key'] = 'key'
kwargs['value'] = 'value'
self.result_cache = None
self.context = {}
super(SwitchManager, self).__init__(*new_args, **kwargs)
def __unicode__(self):
return "<%s: %s (%s)>" % (self.__class__.__name__,
getattr(self, 'model', ''),
registry.values())
def __getitem__(self, key):
"""
Returns a SwitchProxy, rather than a Switch. It allows us to
easily extend the Switches method and automatically include our
manager instance.
"""
return SwitchProxy(self, super(SwitchManager, self).__getitem__(key))
def with_result_cache(func):
"""
Decorator specifically for is_active. If self.result_cache is set to a {}
the is_active results will be cached for each set of params.
"""
def inner(self, *args, **kwargs):
dic = self.result_cache
cache_key = None
if dic is not None:
cache_key = (args, tuple(kwargs.items()))
try:
result = dic.get(cache_key)
except TypeError as e: # not hashable
log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s',
args[0], e, repr(cache_key)[:200])
cache_key = None
else:
if result is not None:
return result
result = func(self, *args, **kwargs)
if cache_key is not None:
dic[cache_key] = result
return result
return inner
@with_result_cache
def is_active(self, key, *instances, **kwargs):
"""
Returns ``True`` if any of ``instances`` match an active switch.
Otherwise returns ``False``.
>>> operator.is_active('my_feature', request) #doctest: +SKIP
"""
try:
default = kwargs.pop('default', False)
# Check all parents for a disabled state
parts = key.split(':')
if len(parts) > 1:
child_kwargs = kwargs.copy()
child_kwargs['default'] = None
result = self.is_active(':'.join(parts[:-1]), *instances,
**child_kwargs)
if result is False:
return result
elif result is True:
default = result
try:
switch = self[key]
except KeyError:
# switch is not defined, defer to parent
return default
if switch.status == GLOBAL:
return True
elif switch.status == DISABLED:
return False
elif switch.status == INHERIT:
return default
conditions = switch.value
# If no conditions are set, we inherit from parents
if not conditions:
return default
instances = list(instances) if instances else []
instances.extend(self.context.values())
# check each switch to see if it can execute
return_value = False
for namespace, condition in conditions.iteritems():
condition_set = registry_by_namespace.get(namespace)
if not condition_set:
continue
result = condition_set.has_active_condition(condition,
instances)
if result is False:
return False
elif result is True:
return_value = True
except:
log.exception('Error checking if switch "%s" is active', key)
return_value = False
# there were no matching conditions, so it must not be enabled
return return_value
# MASKED: register function (lines 184-195)
def unregister(self, condition_set):
"""
Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry.pop(condition_set.get_id(), None)
registry_by_namespace.pop(condition_set.get_namespace(), None)
def get_condition_set_by_id(self, switch_id):
"""
Given the identifier of a condition set (described in
ConditionSet.get_id()), returns the registered instance.
"""
return registry[switch_id]
def get_condition_sets(self):
"""
Returns a generator yielding all currently registered
ConditionSet instances.
"""
return registry.itervalues()
def get_all_conditions(self):
"""
Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP
"""
cs = self.get_condition_sets()
for condition_set in sorted(cs, key=lambda x: x.get_group_label()):
group = unicode(condition_set.get_group_label())
for field in condition_set.fields.itervalues():
yield condition_set.get_id(), group, field
def as_request(self, user=None, ip_address=None):
from .helpers import MockRequest
return MockRequest(user, ip_address)
auto_create = getattr(settings, 'SWITCHBOARD_AUTO_CREATE', True)
operator = SwitchManager(auto_create=auto_create)
|
def register(self, condition_set):
"""
Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry[condition_set.get_id()] = condition_set
registry_by_namespace[condition_set.get_namespace()] = condition_set
| 184 | 195 |
"""
switchboard.manager
~~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import logging
import sqlalchemy as sqla
from .base import ModelDict
from .models import (
Model,
Switch,
DISABLED, SELECTIVE, GLOBAL, INHERIT,
INCLUDE, EXCLUDE,
)
from .proxy import SwitchProxy
from .settings import settings, Settings
from .store import SQLAlchemyStore
log = logging.getLogger(__name__)
# These are (mostly) read-only module variables since we want it shared among
# any and all threads. The only exception to read-only is when they are
# populated on Switchboard startup (i.e., operator.register()).
registry = {}
registry_by_namespace = {}
def nested_config(config):
cfg = {}
token = 'switchboard.'
for k, v in config.iteritems():
if k.startswith(token):
cfg[k.replace(token, '')] = v
return cfg
def configure(config={}, nested=False, cache=None):
"""Useful for when you need to control Switchboard's setup."""
if nested:
config = nested_config(config)
# Re-read settings to make sure we have everything.
Settings.init(cache=cache, **config)
operator.cache = cache
# Establish the connection to the database.
timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10)
dburl = settings.SWITCHBOARD_DBURL
if dburl:
engine = sqla.create_engine(
dburl, connect_args={'connect_timeout': timeout})
Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE)
# Register the builtins.
__import__('switchboard.builtins')
class SwitchManager(ModelDict):
DISABLED = DISABLED
SELECTIVE = SELECTIVE
GLOBAL = GLOBAL
INHERIT = INHERIT
INCLUDE = INCLUDE
EXCLUDE = EXCLUDE
def __init__(self, *args, **kwargs):
# Inject args and kwargs that are known quantities; the SwitchManager
# will always deal with the Switch model and so on.
new_args = [Switch]
new_args.extend(args)
kwargs['key'] = 'key'
kwargs['value'] = 'value'
self.result_cache = None
self.context = {}
super(SwitchManager, self).__init__(*new_args, **kwargs)
def __unicode__(self):
return "<%s: %s (%s)>" % (self.__class__.__name__,
getattr(self, 'model', ''),
registry.values())
def __getitem__(self, key):
"""
Returns a SwitchProxy, rather than a Switch. It allows us to
easily extend the Switches method and automatically include our
manager instance.
"""
return SwitchProxy(self, super(SwitchManager, self).__getitem__(key))
def with_result_cache(func):
"""
Decorator specifically for is_active. If self.result_cache is set to a {}
the is_active results will be cached for each set of params.
"""
def inner(self, *args, **kwargs):
dic = self.result_cache
cache_key = None
if dic is not None:
cache_key = (args, tuple(kwargs.items()))
try:
result = dic.get(cache_key)
except TypeError as e: # not hashable
log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s',
args[0], e, repr(cache_key)[:200])
cache_key = None
else:
if result is not None:
return result
result = func(self, *args, **kwargs)
if cache_key is not None:
dic[cache_key] = result
return result
return inner
@with_result_cache
def is_active(self, key, *instances, **kwargs):
"""
Returns ``True`` if any of ``instances`` match an active switch.
Otherwise returns ``False``.
>>> operator.is_active('my_feature', request) #doctest: +SKIP
"""
try:
default = kwargs.pop('default', False)
# Check all parents for a disabled state
parts = key.split(':')
if len(parts) > 1:
child_kwargs = kwargs.copy()
child_kwargs['default'] = None
result = self.is_active(':'.join(parts[:-1]), *instances,
**child_kwargs)
if result is False:
return result
elif result is True:
default = result
try:
switch = self[key]
except KeyError:
# switch is not defined, defer to parent
return default
if switch.status == GLOBAL:
return True
elif switch.status == DISABLED:
return False
elif switch.status == INHERIT:
return default
conditions = switch.value
# If no conditions are set, we inherit from parents
if not conditions:
return default
instances = list(instances) if instances else []
instances.extend(self.context.values())
# check each switch to see if it can execute
return_value = False
for namespace, condition in conditions.iteritems():
condition_set = registry_by_namespace.get(namespace)
if not condition_set:
continue
result = condition_set.has_active_condition(condition,
instances)
if result is False:
return False
elif result is True:
return_value = True
except:
log.exception('Error checking if switch "%s" is active', key)
return_value = False
# there were no matching conditions, so it must not be enabled
return return_value
def register(self, condition_set):
"""
Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry[condition_set.get_id()] = condition_set
registry_by_namespace[condition_set.get_namespace()] = condition_set
def unregister(self, condition_set):
"""
Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry.pop(condition_set.get_id(), None)
registry_by_namespace.pop(condition_set.get_namespace(), None)
def get_condition_set_by_id(self, switch_id):
"""
Given the identifier of a condition set (described in
ConditionSet.get_id()), returns the registered instance.
"""
return registry[switch_id]
def get_condition_sets(self):
"""
Returns a generator yielding all currently registered
ConditionSet instances.
"""
return registry.itervalues()
def get_all_conditions(self):
"""
Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP
"""
cs = self.get_condition_sets()
for condition_set in sorted(cs, key=lambda x: x.get_group_label()):
group = unicode(condition_set.get_group_label())
for field in condition_set.fields.itervalues():
yield condition_set.get_id(), group, field
def as_request(self, user=None, ip_address=None):
from .helpers import MockRequest
return MockRequest(user, ip_address)
auto_create = getattr(settings, 'SWITCHBOARD_AUTO_CREATE', True)
operator = SwitchManager(auto_create=auto_create)
|
unregister
|
Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP
|
"""
switchboard.manager
~~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import logging
import sqlalchemy as sqla
from .base import ModelDict
from .models import (
Model,
Switch,
DISABLED, SELECTIVE, GLOBAL, INHERIT,
INCLUDE, EXCLUDE,
)
from .proxy import SwitchProxy
from .settings import settings, Settings
from .store import SQLAlchemyStore
log = logging.getLogger(__name__)
# These are (mostly) read-only module variables since we want it shared among
# any and all threads. The only exception to read-only is when they are
# populated on Switchboard startup (i.e., operator.register()).
registry = {}
registry_by_namespace = {}
def nested_config(config):
cfg = {}
token = 'switchboard.'
for k, v in config.iteritems():
if k.startswith(token):
cfg[k.replace(token, '')] = v
return cfg
def configure(config={}, nested=False, cache=None):
"""Useful for when you need to control Switchboard's setup."""
if nested:
config = nested_config(config)
# Re-read settings to make sure we have everything.
Settings.init(cache=cache, **config)
operator.cache = cache
# Establish the connection to the database.
timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10)
dburl = settings.SWITCHBOARD_DBURL
if dburl:
engine = sqla.create_engine(
dburl, connect_args={'connect_timeout': timeout})
Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE)
# Register the builtins.
__import__('switchboard.builtins')
class SwitchManager(ModelDict):
DISABLED = DISABLED
SELECTIVE = SELECTIVE
GLOBAL = GLOBAL
INHERIT = INHERIT
INCLUDE = INCLUDE
EXCLUDE = EXCLUDE
def __init__(self, *args, **kwargs):
# Inject args and kwargs that are known quantities; the SwitchManager
# will always deal with the Switch model and so on.
new_args = [Switch]
new_args.extend(args)
kwargs['key'] = 'key'
kwargs['value'] = 'value'
self.result_cache = None
self.context = {}
super(SwitchManager, self).__init__(*new_args, **kwargs)
def __unicode__(self):
return "<%s: %s (%s)>" % (self.__class__.__name__,
getattr(self, 'model', ''),
registry.values())
def __getitem__(self, key):
"""
Returns a SwitchProxy, rather than a Switch. It allows us to
easily extend the Switches method and automatically include our
manager instance.
"""
return SwitchProxy(self, super(SwitchManager, self).__getitem__(key))
def with_result_cache(func):
"""
Decorator specifically for is_active. If self.result_cache is set to a {}
the is_active results will be cached for each set of params.
"""
def inner(self, *args, **kwargs):
dic = self.result_cache
cache_key = None
if dic is not None:
cache_key = (args, tuple(kwargs.items()))
try:
result = dic.get(cache_key)
except TypeError as e: # not hashable
log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s',
args[0], e, repr(cache_key)[:200])
cache_key = None
else:
if result is not None:
return result
result = func(self, *args, **kwargs)
if cache_key is not None:
dic[cache_key] = result
return result
return inner
@with_result_cache
def is_active(self, key, *instances, **kwargs):
"""
Returns ``True`` if any of ``instances`` match an active switch.
Otherwise returns ``False``.
>>> operator.is_active('my_feature', request) #doctest: +SKIP
"""
try:
default = kwargs.pop('default', False)
# Check all parents for a disabled state
parts = key.split(':')
if len(parts) > 1:
child_kwargs = kwargs.copy()
child_kwargs['default'] = None
result = self.is_active(':'.join(parts[:-1]), *instances,
**child_kwargs)
if result is False:
return result
elif result is True:
default = result
try:
switch = self[key]
except KeyError:
# switch is not defined, defer to parent
return default
if switch.status == GLOBAL:
return True
elif switch.status == DISABLED:
return False
elif switch.status == INHERIT:
return default
conditions = switch.value
# If no conditions are set, we inherit from parents
if not conditions:
return default
instances = list(instances) if instances else []
instances.extend(self.context.values())
# check each switch to see if it can execute
return_value = False
for namespace, condition in conditions.iteritems():
condition_set = registry_by_namespace.get(namespace)
if not condition_set:
continue
result = condition_set.has_active_condition(condition,
instances)
if result is False:
return False
elif result is True:
return_value = True
except:
log.exception('Error checking if switch "%s" is active', key)
return_value = False
# there were no matching conditions, so it must not be enabled
return return_value
def register(self, condition_set):
"""
Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry[condition_set.get_id()] = condition_set
registry_by_namespace[condition_set.get_namespace()] = condition_set
# MASKED: unregister function (lines 197-206)
def get_condition_set_by_id(self, switch_id):
"""
Given the identifier of a condition set (described in
ConditionSet.get_id()), returns the registered instance.
"""
return registry[switch_id]
def get_condition_sets(self):
"""
Returns a generator yielding all currently registered
ConditionSet instances.
"""
return registry.itervalues()
def get_all_conditions(self):
"""
Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP
"""
cs = self.get_condition_sets()
for condition_set in sorted(cs, key=lambda x: x.get_group_label()):
group = unicode(condition_set.get_group_label())
for field in condition_set.fields.itervalues():
yield condition_set.get_id(), group, field
def as_request(self, user=None, ip_address=None):
from .helpers import MockRequest
return MockRequest(user, ip_address)
auto_create = getattr(settings, 'SWITCHBOARD_AUTO_CREATE', True)
operator = SwitchManager(auto_create=auto_create)
|
def unregister(self, condition_set):
"""
Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry.pop(condition_set.get_id(), None)
registry_by_namespace.pop(condition_set.get_namespace(), None)
| 197 | 206 |
"""
switchboard.manager
~~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import logging
import sqlalchemy as sqla
from .base import ModelDict
from .models import (
Model,
Switch,
DISABLED, SELECTIVE, GLOBAL, INHERIT,
INCLUDE, EXCLUDE,
)
from .proxy import SwitchProxy
from .settings import settings, Settings
from .store import SQLAlchemyStore
log = logging.getLogger(__name__)
# These are (mostly) read-only module variables since we want it shared among
# any and all threads. The only exception to read-only is when they are
# populated on Switchboard startup (i.e., operator.register()).
registry = {}
registry_by_namespace = {}
def nested_config(config):
cfg = {}
token = 'switchboard.'
for k, v in config.iteritems():
if k.startswith(token):
cfg[k.replace(token, '')] = v
return cfg
def configure(config={}, nested=False, cache=None):
"""Useful for when you need to control Switchboard's setup."""
if nested:
config = nested_config(config)
# Re-read settings to make sure we have everything.
Settings.init(cache=cache, **config)
operator.cache = cache
# Establish the connection to the database.
timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10)
dburl = settings.SWITCHBOARD_DBURL
if dburl:
engine = sqla.create_engine(
dburl, connect_args={'connect_timeout': timeout})
Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE)
# Register the builtins.
__import__('switchboard.builtins')
class SwitchManager(ModelDict):
DISABLED = DISABLED
SELECTIVE = SELECTIVE
GLOBAL = GLOBAL
INHERIT = INHERIT
INCLUDE = INCLUDE
EXCLUDE = EXCLUDE
def __init__(self, *args, **kwargs):
# Inject args and kwargs that are known quantities; the SwitchManager
# will always deal with the Switch model and so on.
new_args = [Switch]
new_args.extend(args)
kwargs['key'] = 'key'
kwargs['value'] = 'value'
self.result_cache = None
self.context = {}
super(SwitchManager, self).__init__(*new_args, **kwargs)
def __unicode__(self):
return "<%s: %s (%s)>" % (self.__class__.__name__,
getattr(self, 'model', ''),
registry.values())
def __getitem__(self, key):
"""
Returns a SwitchProxy, rather than a Switch. It allows us to
easily extend the Switches method and automatically include our
manager instance.
"""
return SwitchProxy(self, super(SwitchManager, self).__getitem__(key))
def with_result_cache(func):
"""
Decorator specifically for is_active. If self.result_cache is set to a {}
the is_active results will be cached for each set of params.
"""
def inner(self, *args, **kwargs):
dic = self.result_cache
cache_key = None
if dic is not None:
cache_key = (args, tuple(kwargs.items()))
try:
result = dic.get(cache_key)
except TypeError as e: # not hashable
log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s',
args[0], e, repr(cache_key)[:200])
cache_key = None
else:
if result is not None:
return result
result = func(self, *args, **kwargs)
if cache_key is not None:
dic[cache_key] = result
return result
return inner
@with_result_cache
def is_active(self, key, *instances, **kwargs):
"""
Returns ``True`` if any of ``instances`` match an active switch.
Otherwise returns ``False``.
>>> operator.is_active('my_feature', request) #doctest: +SKIP
"""
try:
default = kwargs.pop('default', False)
# Check all parents for a disabled state
parts = key.split(':')
if len(parts) > 1:
child_kwargs = kwargs.copy()
child_kwargs['default'] = None
result = self.is_active(':'.join(parts[:-1]), *instances,
**child_kwargs)
if result is False:
return result
elif result is True:
default = result
try:
switch = self[key]
except KeyError:
# switch is not defined, defer to parent
return default
if switch.status == GLOBAL:
return True
elif switch.status == DISABLED:
return False
elif switch.status == INHERIT:
return default
conditions = switch.value
# If no conditions are set, we inherit from parents
if not conditions:
return default
instances = list(instances) if instances else []
instances.extend(self.context.values())
# check each switch to see if it can execute
return_value = False
for namespace, condition in conditions.iteritems():
condition_set = registry_by_namespace.get(namespace)
if not condition_set:
continue
result = condition_set.has_active_condition(condition,
instances)
if result is False:
return False
elif result is True:
return_value = True
except:
log.exception('Error checking if switch "%s" is active', key)
return_value = False
# there were no matching conditions, so it must not be enabled
return return_value
def register(self, condition_set):
"""
Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry[condition_set.get_id()] = condition_set
registry_by_namespace[condition_set.get_namespace()] = condition_set
def unregister(self, condition_set):
"""
Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry.pop(condition_set.get_id(), None)
registry_by_namespace.pop(condition_set.get_namespace(), None)
def get_condition_set_by_id(self, switch_id):
"""
Given the identifier of a condition set (described in
ConditionSet.get_id()), returns the registered instance.
"""
return registry[switch_id]
def get_condition_sets(self):
"""
Returns a generator yielding all currently registered
ConditionSet instances.
"""
return registry.itervalues()
def get_all_conditions(self):
"""
Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP
"""
cs = self.get_condition_sets()
for condition_set in sorted(cs, key=lambda x: x.get_group_label()):
group = unicode(condition_set.get_group_label())
for field in condition_set.fields.itervalues():
yield condition_set.get_id(), group, field
def as_request(self, user=None, ip_address=None):
from .helpers import MockRequest
return MockRequest(user, ip_address)
auto_create = getattr(settings, 'SWITCHBOARD_AUTO_CREATE', True)
operator = SwitchManager(auto_create=auto_create)
|
get_all_conditions
|
Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP
|
"""
switchboard.manager
~~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import logging
import sqlalchemy as sqla
from .base import ModelDict
from .models import (
Model,
Switch,
DISABLED, SELECTIVE, GLOBAL, INHERIT,
INCLUDE, EXCLUDE,
)
from .proxy import SwitchProxy
from .settings import settings, Settings
from .store import SQLAlchemyStore
log = logging.getLogger(__name__)
# These are (mostly) read-only module variables since we want it shared among
# any and all threads. The only exception to read-only is when they are
# populated on Switchboard startup (i.e., operator.register()).
registry = {}
registry_by_namespace = {}
def nested_config(config):
cfg = {}
token = 'switchboard.'
for k, v in config.iteritems():
if k.startswith(token):
cfg[k.replace(token, '')] = v
return cfg
def configure(config={}, nested=False, cache=None):
"""Useful for when you need to control Switchboard's setup."""
if nested:
config = nested_config(config)
# Re-read settings to make sure we have everything.
Settings.init(cache=cache, **config)
operator.cache = cache
# Establish the connection to the database.
timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10)
dburl = settings.SWITCHBOARD_DBURL
if dburl:
engine = sqla.create_engine(
dburl, connect_args={'connect_timeout': timeout})
Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE)
# Register the builtins.
__import__('switchboard.builtins')
class SwitchManager(ModelDict):
DISABLED = DISABLED
SELECTIVE = SELECTIVE
GLOBAL = GLOBAL
INHERIT = INHERIT
INCLUDE = INCLUDE
EXCLUDE = EXCLUDE
def __init__(self, *args, **kwargs):
# Inject args and kwargs that are known quantities; the SwitchManager
# will always deal with the Switch model and so on.
new_args = [Switch]
new_args.extend(args)
kwargs['key'] = 'key'
kwargs['value'] = 'value'
self.result_cache = None
self.context = {}
super(SwitchManager, self).__init__(*new_args, **kwargs)
def __unicode__(self):
return "<%s: %s (%s)>" % (self.__class__.__name__,
getattr(self, 'model', ''),
registry.values())
def __getitem__(self, key):
"""
Returns a SwitchProxy, rather than a Switch. It allows us to
easily extend the Switches method and automatically include our
manager instance.
"""
return SwitchProxy(self, super(SwitchManager, self).__getitem__(key))
def with_result_cache(func):
"""
Decorator specifically for is_active. If self.result_cache is set to a {}
the is_active results will be cached for each set of params.
"""
def inner(self, *args, **kwargs):
dic = self.result_cache
cache_key = None
if dic is not None:
cache_key = (args, tuple(kwargs.items()))
try:
result = dic.get(cache_key)
except TypeError as e: # not hashable
log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s',
args[0], e, repr(cache_key)[:200])
cache_key = None
else:
if result is not None:
return result
result = func(self, *args, **kwargs)
if cache_key is not None:
dic[cache_key] = result
return result
return inner
@with_result_cache
def is_active(self, key, *instances, **kwargs):
"""
Returns ``True`` if any of ``instances`` match an active switch.
Otherwise returns ``False``.
>>> operator.is_active('my_feature', request) #doctest: +SKIP
"""
try:
default = kwargs.pop('default', False)
# Check all parents for a disabled state
parts = key.split(':')
if len(parts) > 1:
child_kwargs = kwargs.copy()
child_kwargs['default'] = None
result = self.is_active(':'.join(parts[:-1]), *instances,
**child_kwargs)
if result is False:
return result
elif result is True:
default = result
try:
switch = self[key]
except KeyError:
# switch is not defined, defer to parent
return default
if switch.status == GLOBAL:
return True
elif switch.status == DISABLED:
return False
elif switch.status == INHERIT:
return default
conditions = switch.value
# If no conditions are set, we inherit from parents
if not conditions:
return default
instances = list(instances) if instances else []
instances.extend(self.context.values())
# check each switch to see if it can execute
return_value = False
for namespace, condition in conditions.iteritems():
condition_set = registry_by_namespace.get(namespace)
if not condition_set:
continue
result = condition_set.has_active_condition(condition,
instances)
if result is False:
return False
elif result is True:
return_value = True
except:
log.exception('Error checking if switch "%s" is active', key)
return_value = False
# there were no matching conditions, so it must not be enabled
return return_value
def register(self, condition_set):
"""
Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry[condition_set.get_id()] = condition_set
registry_by_namespace[condition_set.get_namespace()] = condition_set
def unregister(self, condition_set):
"""
Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry.pop(condition_set.get_id(), None)
registry_by_namespace.pop(condition_set.get_namespace(), None)
def get_condition_set_by_id(self, switch_id):
"""
Given the identifier of a condition set (described in
ConditionSet.get_id()), returns the registered instance.
"""
return registry[switch_id]
def get_condition_sets(self):
"""
Returns a generator yielding all currently registered
ConditionSet instances.
"""
return registry.itervalues()
# MASKED: get_all_conditions function (lines 222-233)
def as_request(self, user=None, ip_address=None):
from .helpers import MockRequest
return MockRequest(user, ip_address)
auto_create = getattr(settings, 'SWITCHBOARD_AUTO_CREATE', True)
operator = SwitchManager(auto_create=auto_create)
|
def get_all_conditions(self):
"""
Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP
"""
cs = self.get_condition_sets()
for condition_set in sorted(cs, key=lambda x: x.get_group_label()):
group = unicode(condition_set.get_group_label())
for field in condition_set.fields.itervalues():
yield condition_set.get_id(), group, field
| 222 | 233 |
"""
switchboard.manager
~~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import logging
import sqlalchemy as sqla
from .base import ModelDict
from .models import (
Model,
Switch,
DISABLED, SELECTIVE, GLOBAL, INHERIT,
INCLUDE, EXCLUDE,
)
from .proxy import SwitchProxy
from .settings import settings, Settings
from .store import SQLAlchemyStore
log = logging.getLogger(__name__)
# These are (mostly) read-only module variables since we want it shared among
# any and all threads. The only exception to read-only is when they are
# populated on Switchboard startup (i.e., operator.register()).
registry = {}
registry_by_namespace = {}
def nested_config(config):
cfg = {}
token = 'switchboard.'
for k, v in config.iteritems():
if k.startswith(token):
cfg[k.replace(token, '')] = v
return cfg
def configure(config={}, nested=False, cache=None):
"""Useful for when you need to control Switchboard's setup."""
if nested:
config = nested_config(config)
# Re-read settings to make sure we have everything.
Settings.init(cache=cache, **config)
operator.cache = cache
# Establish the connection to the database.
timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10)
dburl = settings.SWITCHBOARD_DBURL
if dburl:
engine = sqla.create_engine(
dburl, connect_args={'connect_timeout': timeout})
Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE)
# Register the builtins.
__import__('switchboard.builtins')
class SwitchManager(ModelDict):
DISABLED = DISABLED
SELECTIVE = SELECTIVE
GLOBAL = GLOBAL
INHERIT = INHERIT
INCLUDE = INCLUDE
EXCLUDE = EXCLUDE
def __init__(self, *args, **kwargs):
# Inject args and kwargs that are known quantities; the SwitchManager
# will always deal with the Switch model and so on.
new_args = [Switch]
new_args.extend(args)
kwargs['key'] = 'key'
kwargs['value'] = 'value'
self.result_cache = None
self.context = {}
super(SwitchManager, self).__init__(*new_args, **kwargs)
def __unicode__(self):
return "<%s: %s (%s)>" % (self.__class__.__name__,
getattr(self, 'model', ''),
registry.values())
def __getitem__(self, key):
"""
Returns a SwitchProxy, rather than a Switch. It allows us to
easily extend the Switches method and automatically include our
manager instance.
"""
return SwitchProxy(self, super(SwitchManager, self).__getitem__(key))
def with_result_cache(func):
"""
Decorator specifically for is_active. If self.result_cache is set to a {}
the is_active results will be cached for each set of params.
"""
def inner(self, *args, **kwargs):
dic = self.result_cache
cache_key = None
if dic is not None:
cache_key = (args, tuple(kwargs.items()))
try:
result = dic.get(cache_key)
except TypeError as e: # not hashable
log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s',
args[0], e, repr(cache_key)[:200])
cache_key = None
else:
if result is not None:
return result
result = func(self, *args, **kwargs)
if cache_key is not None:
dic[cache_key] = result
return result
return inner
@with_result_cache
def is_active(self, key, *instances, **kwargs):
"""
Returns ``True`` if any of ``instances`` match an active switch.
Otherwise returns ``False``.
>>> operator.is_active('my_feature', request) #doctest: +SKIP
"""
try:
default = kwargs.pop('default', False)
# Check all parents for a disabled state
parts = key.split(':')
if len(parts) > 1:
child_kwargs = kwargs.copy()
child_kwargs['default'] = None
result = self.is_active(':'.join(parts[:-1]), *instances,
**child_kwargs)
if result is False:
return result
elif result is True:
default = result
try:
switch = self[key]
except KeyError:
# switch is not defined, defer to parent
return default
if switch.status == GLOBAL:
return True
elif switch.status == DISABLED:
return False
elif switch.status == INHERIT:
return default
conditions = switch.value
# If no conditions are set, we inherit from parents
if not conditions:
return default
instances = list(instances) if instances else []
instances.extend(self.context.values())
# check each switch to see if it can execute
return_value = False
for namespace, condition in conditions.iteritems():
condition_set = registry_by_namespace.get(namespace)
if not condition_set:
continue
result = condition_set.has_active_condition(condition,
instances)
if result is False:
return False
elif result is True:
return_value = True
except:
log.exception('Error checking if switch "%s" is active', key)
return_value = False
# there were no matching conditions, so it must not be enabled
return return_value
def register(self, condition_set):
"""
Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry[condition_set.get_id()] = condition_set
registry_by_namespace[condition_set.get_namespace()] = condition_set
def unregister(self, condition_set):
"""
Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry.pop(condition_set.get_id(), None)
registry_by_namespace.pop(condition_set.get_namespace(), None)
def get_condition_set_by_id(self, switch_id):
"""
Given the identifier of a condition set (described in
ConditionSet.get_id()), returns the registered instance.
"""
return registry[switch_id]
def get_condition_sets(self):
"""
Returns a generator yielding all currently registered
ConditionSet instances.
"""
return registry.itervalues()
def get_all_conditions(self):
"""
Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP
"""
cs = self.get_condition_sets()
for condition_set in sorted(cs, key=lambda x: x.get_group_label()):
group = unicode(condition_set.get_group_label())
for field in condition_set.fields.itervalues():
yield condition_set.get_id(), group, field
def as_request(self, user=None, ip_address=None):
from .helpers import MockRequest
return MockRequest(user, ip_address)
auto_create = getattr(settings, 'SWITCHBOARD_AUTO_CREATE', True)
operator = SwitchManager(auto_create=auto_create)
|
grep_core
|
We're using the WEBVTT subtitle format. It's better than srt
because it doesn't emit line numbers and the time code is in
(hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss)
|
import sys
import os
import re
import tempfile
import auto_editor
import auto_editor.vanparse as vanparse
from auto_editor.utils.log import Log
from auto_editor.ffwrapper import FFmpeg
def grep_options(parser):
parser.add_argument('--no-filename', action='store_true',
help='Never print filenames with output lines.')
parser.add_argument('--max-count', '-m', type=int, default=None,
help='Stop reading a file after NUM matching lines.')
parser.add_argument('--count', '-c', action='store_true',
help='Suppress normal output; instead print count of matching lines for each file.')
parser.add_argument('--ignore-case', '-i', action='store_true',
help='Ignore case distinctions for the PATTERN.')
parser.add_argument('--timecode', action='store_true',
help="Print the match's timecode.")
parser.add_argument('--time', action='store_true',
help="Print when the match happens. (Ignore ending).")
parser.add_argument('--ffmpeg-location', default=None,
help='Point to your custom ffmpeg file.')
parser.add_argument('--my-ffmpeg', action='store_true',
help='Use the ffmpeg on your PATH instead of the one packaged.')
parser.add_argument('--help', '-h', action='store_true',
help='Print info about the program or an option and exit.')
parser.add_required('input', nargs='*', help='The path to a file you want inspected.')
return parser
# stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
def cleanhtml(raw_html: str) -> str:
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
# MASKED: grep_core function (lines 40-101)
def main(sys_args=sys.argv[1:]):
parser = vanparse.ArgumentParser('grep', auto_editor.version,
description='Read and match subtitle tracks in media files.',
)
parser = grep_options(parser)
TEMP = tempfile.mkdtemp()
log = Log(temp=TEMP)
try:
args = parser.parse_args(sys_args)
except vanparse.ParserError as e:
log.error(str(e))
ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, debug=False)
media_files = args.input[1:]
add_prefix = (len(media_files) > 1 or os.path.isdir(media_files[0])) and not args.no_filename
for media_file in media_files:
if not os.path.exists(media_file):
log.error(f'{media_file}: File does not exist.')
if os.path.isdir(media_file):
for _, _, files in os.walk(media_file):
for file in files:
if file == '.DS_Store':
continue
grep_core(os.path.join(media_file, file), add_prefix, ffmpeg, args,
log, TEMP)
else:
grep_core(media_file, add_prefix, ffmpeg, args, log, TEMP)
log.cleanup()
if __name__ == '__main__':
main()
|
def grep_core(
media_file: str, add_prefix: bool, ffmpeg: FFmpeg, args, log: Log, TEMP: str
) -> None:
"""
We're using the WEBVTT subtitle format. It's better than srt
because it doesn't emit line numbers and the time code is in
(hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss)
"""
out_file = os.path.join(TEMP, 'media.vtt')
ffmpeg.run(['-i', media_file, out_file])
count = 0
flags = 0
if args.ignore_case:
flags = re.IGNORECASE
prefix = ''
if add_prefix:
prefix = '{}:'.format(os.path.splitext(os.path.basename(media_file))[0])
if args.max_count is None:
args.max_count = float('inf')
timecode = ''
line_number = -1
with open(out_file, 'r') as file:
while True:
line = file.readline()
line_number += 1
if line_number == 0:
continue
if not line or count >= args.max_count:
break
if line.strip() == '':
continue
if re.match(r'\d*:\d\d.\d*\s-->\s\d*:\d\d.\d*', line):
if args.time:
timecode = line.split('-->')[0].strip() + ' '
else:
timecode = line.strip() + '; '
continue
line = cleanhtml(line)
match = re.search(args.input[0], line, flags)
line = line.strip()
if match:
count += 1
if not args.count:
if args.timecode or args.time:
print(prefix + timecode + line)
else:
print(prefix + line)
if args.count:
print(prefix + str(count))
| 40 | 101 |
import sys
import os
import re
import tempfile
import auto_editor
import auto_editor.vanparse as vanparse
from auto_editor.utils.log import Log
from auto_editor.ffwrapper import FFmpeg
def grep_options(parser):
parser.add_argument('--no-filename', action='store_true',
help='Never print filenames with output lines.')
parser.add_argument('--max-count', '-m', type=int, default=None,
help='Stop reading a file after NUM matching lines.')
parser.add_argument('--count', '-c', action='store_true',
help='Suppress normal output; instead print count of matching lines for each file.')
parser.add_argument('--ignore-case', '-i', action='store_true',
help='Ignore case distinctions for the PATTERN.')
parser.add_argument('--timecode', action='store_true',
help="Print the match's timecode.")
parser.add_argument('--time', action='store_true',
help="Print when the match happens. (Ignore ending).")
parser.add_argument('--ffmpeg-location', default=None,
help='Point to your custom ffmpeg file.')
parser.add_argument('--my-ffmpeg', action='store_true',
help='Use the ffmpeg on your PATH instead of the one packaged.')
parser.add_argument('--help', '-h', action='store_true',
help='Print info about the program or an option and exit.')
parser.add_required('input', nargs='*', help='The path to a file you want inspected.')
return parser
# stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
def cleanhtml(raw_html: str) -> str:
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
def grep_core(
media_file: str, add_prefix: bool, ffmpeg: FFmpeg, args, log: Log, TEMP: str
) -> None:
"""
We're using the WEBVTT subtitle format. It's better than srt
because it doesn't emit line numbers and the time code is in
(hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss)
"""
out_file = os.path.join(TEMP, 'media.vtt')
ffmpeg.run(['-i', media_file, out_file])
count = 0
flags = 0
if args.ignore_case:
flags = re.IGNORECASE
prefix = ''
if add_prefix:
prefix = '{}:'.format(os.path.splitext(os.path.basename(media_file))[0])
if args.max_count is None:
args.max_count = float('inf')
timecode = ''
line_number = -1
with open(out_file, 'r') as file:
while True:
line = file.readline()
line_number += 1
if line_number == 0:
continue
if not line or count >= args.max_count:
break
if line.strip() == '':
continue
if re.match(r'\d*:\d\d.\d*\s-->\s\d*:\d\d.\d*', line):
if args.time:
timecode = line.split('-->')[0].strip() + ' '
else:
timecode = line.strip() + '; '
continue
line = cleanhtml(line)
match = re.search(args.input[0], line, flags)
line = line.strip()
if match:
count += 1
if not args.count:
if args.timecode or args.time:
print(prefix + timecode + line)
else:
print(prefix + line)
if args.count:
print(prefix + str(count))
def main(sys_args=sys.argv[1:]):
parser = vanparse.ArgumentParser('grep', auto_editor.version,
description='Read and match subtitle tracks in media files.',
)
parser = grep_options(parser)
TEMP = tempfile.mkdtemp()
log = Log(temp=TEMP)
try:
args = parser.parse_args(sys_args)
except vanparse.ParserError as e:
log.error(str(e))
ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, debug=False)
media_files = args.input[1:]
add_prefix = (len(media_files) > 1 or os.path.isdir(media_files[0])) and not args.no_filename
for media_file in media_files:
if not os.path.exists(media_file):
log.error(f'{media_file}: File does not exist.')
if os.path.isdir(media_file):
for _, _, files in os.walk(media_file):
for file in files:
if file == '.DS_Store':
continue
grep_core(os.path.join(media_file, file), add_prefix, ffmpeg, args,
log, TEMP)
else:
grep_core(media_file, add_prefix, ffmpeg, args, log, TEMP)
log.cleanup()
if __name__ == '__main__':
main()
|
absolute_login_url
|
Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
|
"""
Create a blueprint with endpoints for logins from configured identity providers.
The identity providers include, for example, Google, Shibboleth, or another
fence instance. See the other files in this directory for the definitions of
the endpoints for each provider.
"""
from authlib.common.urls import add_params_to_uri
import flask
import requests
from cdislogging import get_logger
from fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback
from fence.blueprints.login.cognito import CognitoLogin, CognitoCallback
from fence.blueprints.login.fence_login import FenceLogin, FenceCallback
from fence.blueprints.login.google import GoogleLogin, GoogleCallback
from fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback
from fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback
from fence.blueprints.login.okta import OktaLogin, OktaCallback
from fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback
from fence.blueprints.login.ras import RASLogin, RASCallback
from fence.blueprints.login.synapse import SynapseLogin, SynapseCallback
from fence.errors import InternalError
from fence.resources.audit.utils import enable_audit_logging
from fence.restful import RestfulApi
from fence.config import config
logger = get_logger(__name__)
# Mapping from IDP ID to the name in the URL on the blueprint (see below).
IDP_URL_MAP = {
"fence": "fence",
"google": "google",
"shibboleth": "shib",
"orcid": "orcid",
"synapse": "synapse",
"microsoft": "microsoft",
"okta": "okta",
"cognito": "cognito",
"ras": "ras",
"cilogon": "cilogon",
}
# MASKED: absolute_login_url function (lines 47-77)
def provider_info(login_details):
"""
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
"""
info = {
# "id" deprecated, replaced by "idp"
"id": login_details["idp"],
"idp": login_details["idp"],
"name": login_details["name"],
# "url" deprecated, replaced by "urls"
"url": absolute_login_url(login_details["idp"]),
"desc": login_details.get("desc", None),
"secondary": login_details.get("secondary", False),
}
# for Fence multi-tenant login
fence_idp = None
if login_details["idp"] == "fence":
fence_idp = login_details.get("fence_idp")
# handle Shibboleth IDPs: InCommon login can either be configured
# directly in this Fence, or through multi-tenant Fence
if (
login_details["idp"] == "shibboleth" or fence_idp == "shibboleth"
) and "shib_idps" in login_details:
# get list of all available shib IDPs
if not hasattr(flask.current_app, "all_shib_idps"):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details["shib_idps"]
if requested_shib_idps == "*":
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
# get the display names for each requested shib IDP
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next(
(
available_shib_idp
for available_shib_idp in flask.current_app.all_shib_idps
if available_shib_idp["idp"] == requested_shib_idp
),
None,
)
if not shib_idp:
raise InternalError(
'Requested shib_idp "{}" does not exist'.format(
requested_shib_idp
)
)
shib_idps.append(shib_idp)
else:
raise InternalError(
'fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(
requested_shib_idps
)
)
info["urls"] = [
{
"name": shib_idp["name"],
"url": absolute_login_url(
login_details["idp"], fence_idp, shib_idp["idp"]
),
}
for shib_idp in shib_idps
]
# non-Shibboleth provider
else:
info["urls"] = [
{
"name": login_details["name"],
"url": absolute_login_url(login_details["idp"], fence_idp),
}
]
return info
def get_login_providers_info():
# default login option
if config.get("DEFAULT_LOGIN_IDP"):
default_idp = config["DEFAULT_LOGIN_IDP"]
elif "default" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on ENABLED_IDENTITY_PROVIDERS.default
default_idp = config["ENABLED_IDENTITY_PROVIDERS"]["default"]
else:
logger.warning("DEFAULT_LOGIN_IDP not configured")
default_idp = None
# other login options
if config["LOGIN_OPTIONS"]:
login_options = config["LOGIN_OPTIONS"]
elif "providers" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on "providers" and convert to "login_options" format
enabled_providers = config["ENABLED_IDENTITY_PROVIDERS"]["providers"]
login_options = [
{
"name": details.get("name"),
"idp": idp,
"desc": details.get("desc"),
"secondary": details.get("secondary"),
}
for idp, details in enabled_providers.items()
]
else:
logger.warning("LOGIN_OPTIONS not configured or empty")
login_options = []
try:
all_provider_info = [
provider_info(login_details) for login_details in login_options
]
except KeyError as e:
raise InternalError("LOGIN_OPTIONS misconfigured: cannot find key {}".format(e))
# if several login_options are defined for this default IDP, will
# default to the first one:
default_provider_info = next(
(info for info in all_provider_info if info["idp"] == default_idp), None
)
if not default_provider_info:
raise InternalError(
"default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS".format(
default_idp
)
)
return default_provider_info, all_provider_info
def make_login_blueprint():
"""
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
"""
blueprint = flask.Blueprint("login", __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route("", methods=["GET"])
def default_login():
"""
The default root login route.
"""
default_provider_info, all_provider_info = get_login_providers_info()
return flask.jsonify(
{"default_provider": default_provider_info, "providers": all_provider_info}
)
# Add identity provider login routes for IDPs enabled in the config.
configured_idps = config["OPENID_CONNECT"].keys()
if "fence" in configured_idps:
blueprint_api.add_resource(FenceLogin, "/fence", strict_slashes=False)
blueprint_api.add_resource(FenceCallback, "/fence/login", strict_slashes=False)
if "google" in configured_idps:
blueprint_api.add_resource(GoogleLogin, "/google", strict_slashes=False)
blueprint_api.add_resource(
GoogleCallback, "/google/login", strict_slashes=False
)
if "orcid" in configured_idps:
blueprint_api.add_resource(ORCIDLogin, "/orcid", strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, "/orcid/login", strict_slashes=False)
if "ras" in configured_idps:
blueprint_api.add_resource(RASLogin, "/ras", strict_slashes=False)
# note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs
blueprint_api.add_resource(RASCallback, "/ras/callback", strict_slashes=False)
if "synapse" in configured_idps:
blueprint_api.add_resource(SynapseLogin, "/synapse", strict_slashes=False)
blueprint_api.add_resource(
SynapseCallback, "/synapse/login", strict_slashes=False
)
if "microsoft" in configured_idps:
blueprint_api.add_resource(MicrosoftLogin, "/microsoft", strict_slashes=False)
blueprint_api.add_resource(
MicrosoftCallback, "/microsoft/login", strict_slashes=False
)
if "okta" in configured_idps:
blueprint_api.add_resource(OktaLogin, "/okta", strict_slashes=False)
blueprint_api.add_resource(OktaCallback, "/okta/login", strict_slashes=False)
if "cognito" in configured_idps:
blueprint_api.add_resource(CognitoLogin, "/cognito", strict_slashes=False)
blueprint_api.add_resource(
CognitoCallback, "/cognito/login", strict_slashes=False
)
if "shibboleth" in configured_idps:
blueprint_api.add_resource(ShibbolethLogin, "/shib", strict_slashes=False)
blueprint_api.add_resource(
ShibbolethCallback, "/shib/login", strict_slashes=False
)
if "cilogon" in configured_idps:
blueprint_api.add_resource(CilogonLogin, "/cilogon", strict_slashes=False)
blueprint_api.add_resource(
CilogonCallback, "/cilogon/login", strict_slashes=False
)
return blueprint
def get_all_shib_idps():
"""
Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries
"""
url = config["OPENID_CONNECT"].get("fence", {}).get("shibboleth_discovery_url")
if not url:
raise InternalError(
"Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured"
)
res = requests.get(url)
assert (
res.status_code == 200
), "Unable to get list of Shibboleth IDPs from {}".format(url)
all_shib_idps = []
for shib_idp in res.json():
if "entityID" not in shib_idp:
logger.warning(
f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP."
)
continue
idp = shib_idp["entityID"]
if len(shib_idp.get("DisplayNames", [])) > 0:
name = get_shib_idp_en_name(shib_idp["DisplayNames"])
else:
logger.warning(
f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name."
)
name = idp
all_shib_idps.append(
{
"idp": idp,
"name": name,
}
)
return all_shib_idps
def get_shib_idp_en_name(names):
"""
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
"""
for name in names:
if name.get("lang") == "en":
return name["value"]
return names[0]["value"]
|
def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
"""
Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
"""
try:
base_url = config["BASE_URL"].rstrip("/")
login_url = base_url + "/login/{}".format(IDP_URL_MAP[provider_id])
except KeyError as e:
raise InternalError("identity provider misconfigured: {}".format(str(e)))
params = {}
if fence_idp:
params["idp"] = fence_idp
if shib_idp:
params["shib_idp"] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url
| 47 | 77 |
"""
Create a blueprint with endpoints for logins from configured identity providers.
The identity providers include, for example, Google, Shibboleth, or another
fence instance. See the other files in this directory for the definitions of
the endpoints for each provider.
"""
from authlib.common.urls import add_params_to_uri
import flask
import requests
from cdislogging import get_logger
from fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback
from fence.blueprints.login.cognito import CognitoLogin, CognitoCallback
from fence.blueprints.login.fence_login import FenceLogin, FenceCallback
from fence.blueprints.login.google import GoogleLogin, GoogleCallback
from fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback
from fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback
from fence.blueprints.login.okta import OktaLogin, OktaCallback
from fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback
from fence.blueprints.login.ras import RASLogin, RASCallback
from fence.blueprints.login.synapse import SynapseLogin, SynapseCallback
from fence.errors import InternalError
from fence.resources.audit.utils import enable_audit_logging
from fence.restful import RestfulApi
from fence.config import config
logger = get_logger(__name__)
# Mapping from IDP ID to the name in the URL on the blueprint (see below).
IDP_URL_MAP = {
"fence": "fence",
"google": "google",
"shibboleth": "shib",
"orcid": "orcid",
"synapse": "synapse",
"microsoft": "microsoft",
"okta": "okta",
"cognito": "cognito",
"ras": "ras",
"cilogon": "cilogon",
}
def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
"""
Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
"""
try:
base_url = config["BASE_URL"].rstrip("/")
login_url = base_url + "/login/{}".format(IDP_URL_MAP[provider_id])
except KeyError as e:
raise InternalError("identity provider misconfigured: {}".format(str(e)))
params = {}
if fence_idp:
params["idp"] = fence_idp
if shib_idp:
params["shib_idp"] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url
def provider_info(login_details):
"""
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
"""
info = {
# "id" deprecated, replaced by "idp"
"id": login_details["idp"],
"idp": login_details["idp"],
"name": login_details["name"],
# "url" deprecated, replaced by "urls"
"url": absolute_login_url(login_details["idp"]),
"desc": login_details.get("desc", None),
"secondary": login_details.get("secondary", False),
}
# for Fence multi-tenant login
fence_idp = None
if login_details["idp"] == "fence":
fence_idp = login_details.get("fence_idp")
# handle Shibboleth IDPs: InCommon login can either be configured
# directly in this Fence, or through multi-tenant Fence
if (
login_details["idp"] == "shibboleth" or fence_idp == "shibboleth"
) and "shib_idps" in login_details:
# get list of all available shib IDPs
if not hasattr(flask.current_app, "all_shib_idps"):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details["shib_idps"]
if requested_shib_idps == "*":
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
# get the display names for each requested shib IDP
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next(
(
available_shib_idp
for available_shib_idp in flask.current_app.all_shib_idps
if available_shib_idp["idp"] == requested_shib_idp
),
None,
)
if not shib_idp:
raise InternalError(
'Requested shib_idp "{}" does not exist'.format(
requested_shib_idp
)
)
shib_idps.append(shib_idp)
else:
raise InternalError(
'fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(
requested_shib_idps
)
)
info["urls"] = [
{
"name": shib_idp["name"],
"url": absolute_login_url(
login_details["idp"], fence_idp, shib_idp["idp"]
),
}
for shib_idp in shib_idps
]
# non-Shibboleth provider
else:
info["urls"] = [
{
"name": login_details["name"],
"url": absolute_login_url(login_details["idp"], fence_idp),
}
]
return info
def get_login_providers_info():
# default login option
if config.get("DEFAULT_LOGIN_IDP"):
default_idp = config["DEFAULT_LOGIN_IDP"]
elif "default" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on ENABLED_IDENTITY_PROVIDERS.default
default_idp = config["ENABLED_IDENTITY_PROVIDERS"]["default"]
else:
logger.warning("DEFAULT_LOGIN_IDP not configured")
default_idp = None
# other login options
if config["LOGIN_OPTIONS"]:
login_options = config["LOGIN_OPTIONS"]
elif "providers" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on "providers" and convert to "login_options" format
enabled_providers = config["ENABLED_IDENTITY_PROVIDERS"]["providers"]
login_options = [
{
"name": details.get("name"),
"idp": idp,
"desc": details.get("desc"),
"secondary": details.get("secondary"),
}
for idp, details in enabled_providers.items()
]
else:
logger.warning("LOGIN_OPTIONS not configured or empty")
login_options = []
try:
all_provider_info = [
provider_info(login_details) for login_details in login_options
]
except KeyError as e:
raise InternalError("LOGIN_OPTIONS misconfigured: cannot find key {}".format(e))
# if several login_options are defined for this default IDP, will
# default to the first one:
default_provider_info = next(
(info for info in all_provider_info if info["idp"] == default_idp), None
)
if not default_provider_info:
raise InternalError(
"default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS".format(
default_idp
)
)
return default_provider_info, all_provider_info
def make_login_blueprint():
"""
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
"""
blueprint = flask.Blueprint("login", __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route("", methods=["GET"])
def default_login():
"""
The default root login route.
"""
default_provider_info, all_provider_info = get_login_providers_info()
return flask.jsonify(
{"default_provider": default_provider_info, "providers": all_provider_info}
)
# Add identity provider login routes for IDPs enabled in the config.
configured_idps = config["OPENID_CONNECT"].keys()
if "fence" in configured_idps:
blueprint_api.add_resource(FenceLogin, "/fence", strict_slashes=False)
blueprint_api.add_resource(FenceCallback, "/fence/login", strict_slashes=False)
if "google" in configured_idps:
blueprint_api.add_resource(GoogleLogin, "/google", strict_slashes=False)
blueprint_api.add_resource(
GoogleCallback, "/google/login", strict_slashes=False
)
if "orcid" in configured_idps:
blueprint_api.add_resource(ORCIDLogin, "/orcid", strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, "/orcid/login", strict_slashes=False)
if "ras" in configured_idps:
blueprint_api.add_resource(RASLogin, "/ras", strict_slashes=False)
# note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs
blueprint_api.add_resource(RASCallback, "/ras/callback", strict_slashes=False)
if "synapse" in configured_idps:
blueprint_api.add_resource(SynapseLogin, "/synapse", strict_slashes=False)
blueprint_api.add_resource(
SynapseCallback, "/synapse/login", strict_slashes=False
)
if "microsoft" in configured_idps:
blueprint_api.add_resource(MicrosoftLogin, "/microsoft", strict_slashes=False)
blueprint_api.add_resource(
MicrosoftCallback, "/microsoft/login", strict_slashes=False
)
if "okta" in configured_idps:
blueprint_api.add_resource(OktaLogin, "/okta", strict_slashes=False)
blueprint_api.add_resource(OktaCallback, "/okta/login", strict_slashes=False)
if "cognito" in configured_idps:
blueprint_api.add_resource(CognitoLogin, "/cognito", strict_slashes=False)
blueprint_api.add_resource(
CognitoCallback, "/cognito/login", strict_slashes=False
)
if "shibboleth" in configured_idps:
blueprint_api.add_resource(ShibbolethLogin, "/shib", strict_slashes=False)
blueprint_api.add_resource(
ShibbolethCallback, "/shib/login", strict_slashes=False
)
if "cilogon" in configured_idps:
blueprint_api.add_resource(CilogonLogin, "/cilogon", strict_slashes=False)
blueprint_api.add_resource(
CilogonCallback, "/cilogon/login", strict_slashes=False
)
return blueprint
def get_all_shib_idps():
"""
Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries
"""
url = config["OPENID_CONNECT"].get("fence", {}).get("shibboleth_discovery_url")
if not url:
raise InternalError(
"Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured"
)
res = requests.get(url)
assert (
res.status_code == 200
), "Unable to get list of Shibboleth IDPs from {}".format(url)
all_shib_idps = []
for shib_idp in res.json():
if "entityID" not in shib_idp:
logger.warning(
f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP."
)
continue
idp = shib_idp["entityID"]
if len(shib_idp.get("DisplayNames", [])) > 0:
name = get_shib_idp_en_name(shib_idp["DisplayNames"])
else:
logger.warning(
f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name."
)
name = idp
all_shib_idps.append(
{
"idp": idp,
"name": name,
}
)
return all_shib_idps
def get_shib_idp_en_name(names):
"""
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
"""
for name in names:
if name.get("lang") == "en":
return name["value"]
return names[0]["value"]
|
provider_info
|
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
|
"""
Create a blueprint with endpoints for logins from configured identity providers.
The identity providers include, for example, Google, Shibboleth, or another
fence instance. See the other files in this directory for the definitions of
the endpoints for each provider.
"""
from authlib.common.urls import add_params_to_uri
import flask
import requests
from cdislogging import get_logger
from fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback
from fence.blueprints.login.cognito import CognitoLogin, CognitoCallback
from fence.blueprints.login.fence_login import FenceLogin, FenceCallback
from fence.blueprints.login.google import GoogleLogin, GoogleCallback
from fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback
from fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback
from fence.blueprints.login.okta import OktaLogin, OktaCallback
from fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback
from fence.blueprints.login.ras import RASLogin, RASCallback
from fence.blueprints.login.synapse import SynapseLogin, SynapseCallback
from fence.errors import InternalError
from fence.resources.audit.utils import enable_audit_logging
from fence.restful import RestfulApi
from fence.config import config
logger = get_logger(__name__)
# Mapping from IDP ID to the name in the URL on the blueprint (see below).
IDP_URL_MAP = {
"fence": "fence",
"google": "google",
"shibboleth": "shib",
"orcid": "orcid",
"synapse": "synapse",
"microsoft": "microsoft",
"okta": "okta",
"cognito": "cognito",
"ras": "ras",
"cilogon": "cilogon",
}
def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
"""
Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
"""
try:
base_url = config["BASE_URL"].rstrip("/")
login_url = base_url + "/login/{}".format(IDP_URL_MAP[provider_id])
except KeyError as e:
raise InternalError("identity provider misconfigured: {}".format(str(e)))
params = {}
if fence_idp:
params["idp"] = fence_idp
if shib_idp:
params["shib_idp"] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url
# MASKED: provider_info function (lines 80-174)
def get_login_providers_info():
# default login option
if config.get("DEFAULT_LOGIN_IDP"):
default_idp = config["DEFAULT_LOGIN_IDP"]
elif "default" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on ENABLED_IDENTITY_PROVIDERS.default
default_idp = config["ENABLED_IDENTITY_PROVIDERS"]["default"]
else:
logger.warning("DEFAULT_LOGIN_IDP not configured")
default_idp = None
# other login options
if config["LOGIN_OPTIONS"]:
login_options = config["LOGIN_OPTIONS"]
elif "providers" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on "providers" and convert to "login_options" format
enabled_providers = config["ENABLED_IDENTITY_PROVIDERS"]["providers"]
login_options = [
{
"name": details.get("name"),
"idp": idp,
"desc": details.get("desc"),
"secondary": details.get("secondary"),
}
for idp, details in enabled_providers.items()
]
else:
logger.warning("LOGIN_OPTIONS not configured or empty")
login_options = []
try:
all_provider_info = [
provider_info(login_details) for login_details in login_options
]
except KeyError as e:
raise InternalError("LOGIN_OPTIONS misconfigured: cannot find key {}".format(e))
# if several login_options are defined for this default IDP, will
# default to the first one:
default_provider_info = next(
(info for info in all_provider_info if info["idp"] == default_idp), None
)
if not default_provider_info:
raise InternalError(
"default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS".format(
default_idp
)
)
return default_provider_info, all_provider_info
def make_login_blueprint():
"""
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
"""
blueprint = flask.Blueprint("login", __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route("", methods=["GET"])
def default_login():
"""
The default root login route.
"""
default_provider_info, all_provider_info = get_login_providers_info()
return flask.jsonify(
{"default_provider": default_provider_info, "providers": all_provider_info}
)
# Add identity provider login routes for IDPs enabled in the config.
configured_idps = config["OPENID_CONNECT"].keys()
if "fence" in configured_idps:
blueprint_api.add_resource(FenceLogin, "/fence", strict_slashes=False)
blueprint_api.add_resource(FenceCallback, "/fence/login", strict_slashes=False)
if "google" in configured_idps:
blueprint_api.add_resource(GoogleLogin, "/google", strict_slashes=False)
blueprint_api.add_resource(
GoogleCallback, "/google/login", strict_slashes=False
)
if "orcid" in configured_idps:
blueprint_api.add_resource(ORCIDLogin, "/orcid", strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, "/orcid/login", strict_slashes=False)
if "ras" in configured_idps:
blueprint_api.add_resource(RASLogin, "/ras", strict_slashes=False)
# note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs
blueprint_api.add_resource(RASCallback, "/ras/callback", strict_slashes=False)
if "synapse" in configured_idps:
blueprint_api.add_resource(SynapseLogin, "/synapse", strict_slashes=False)
blueprint_api.add_resource(
SynapseCallback, "/synapse/login", strict_slashes=False
)
if "microsoft" in configured_idps:
blueprint_api.add_resource(MicrosoftLogin, "/microsoft", strict_slashes=False)
blueprint_api.add_resource(
MicrosoftCallback, "/microsoft/login", strict_slashes=False
)
if "okta" in configured_idps:
blueprint_api.add_resource(OktaLogin, "/okta", strict_slashes=False)
blueprint_api.add_resource(OktaCallback, "/okta/login", strict_slashes=False)
if "cognito" in configured_idps:
blueprint_api.add_resource(CognitoLogin, "/cognito", strict_slashes=False)
blueprint_api.add_resource(
CognitoCallback, "/cognito/login", strict_slashes=False
)
if "shibboleth" in configured_idps:
blueprint_api.add_resource(ShibbolethLogin, "/shib", strict_slashes=False)
blueprint_api.add_resource(
ShibbolethCallback, "/shib/login", strict_slashes=False
)
if "cilogon" in configured_idps:
blueprint_api.add_resource(CilogonLogin, "/cilogon", strict_slashes=False)
blueprint_api.add_resource(
CilogonCallback, "/cilogon/login", strict_slashes=False
)
return blueprint
def get_all_shib_idps():
"""
Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries
"""
url = config["OPENID_CONNECT"].get("fence", {}).get("shibboleth_discovery_url")
if not url:
raise InternalError(
"Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured"
)
res = requests.get(url)
assert (
res.status_code == 200
), "Unable to get list of Shibboleth IDPs from {}".format(url)
all_shib_idps = []
for shib_idp in res.json():
if "entityID" not in shib_idp:
logger.warning(
f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP."
)
continue
idp = shib_idp["entityID"]
if len(shib_idp.get("DisplayNames", [])) > 0:
name = get_shib_idp_en_name(shib_idp["DisplayNames"])
else:
logger.warning(
f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name."
)
name = idp
all_shib_idps.append(
{
"idp": idp,
"name": name,
}
)
return all_shib_idps
def get_shib_idp_en_name(names):
"""
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
"""
for name in names:
if name.get("lang") == "en":
return name["value"]
return names[0]["value"]
|
def provider_info(login_details):
"""
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
"""
info = {
# "id" deprecated, replaced by "idp"
"id": login_details["idp"],
"idp": login_details["idp"],
"name": login_details["name"],
# "url" deprecated, replaced by "urls"
"url": absolute_login_url(login_details["idp"]),
"desc": login_details.get("desc", None),
"secondary": login_details.get("secondary", False),
}
# for Fence multi-tenant login
fence_idp = None
if login_details["idp"] == "fence":
fence_idp = login_details.get("fence_idp")
# handle Shibboleth IDPs: InCommon login can either be configured
# directly in this Fence, or through multi-tenant Fence
if (
login_details["idp"] == "shibboleth" or fence_idp == "shibboleth"
) and "shib_idps" in login_details:
# get list of all available shib IDPs
if not hasattr(flask.current_app, "all_shib_idps"):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details["shib_idps"]
if requested_shib_idps == "*":
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
# get the display names for each requested shib IDP
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next(
(
available_shib_idp
for available_shib_idp in flask.current_app.all_shib_idps
if available_shib_idp["idp"] == requested_shib_idp
),
None,
)
if not shib_idp:
raise InternalError(
'Requested shib_idp "{}" does not exist'.format(
requested_shib_idp
)
)
shib_idps.append(shib_idp)
else:
raise InternalError(
'fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(
requested_shib_idps
)
)
info["urls"] = [
{
"name": shib_idp["name"],
"url": absolute_login_url(
login_details["idp"], fence_idp, shib_idp["idp"]
),
}
for shib_idp in shib_idps
]
# non-Shibboleth provider
else:
info["urls"] = [
{
"name": login_details["name"],
"url": absolute_login_url(login_details["idp"], fence_idp),
}
]
return info
| 80 | 174 |
"""
Create a blueprint with endpoints for logins from configured identity providers.
The identity providers include, for example, Google, Shibboleth, or another
fence instance. See the other files in this directory for the definitions of
the endpoints for each provider.
"""
from authlib.common.urls import add_params_to_uri
import flask
import requests
from cdislogging import get_logger
from fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback
from fence.blueprints.login.cognito import CognitoLogin, CognitoCallback
from fence.blueprints.login.fence_login import FenceLogin, FenceCallback
from fence.blueprints.login.google import GoogleLogin, GoogleCallback
from fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback
from fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback
from fence.blueprints.login.okta import OktaLogin, OktaCallback
from fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback
from fence.blueprints.login.ras import RASLogin, RASCallback
from fence.blueprints.login.synapse import SynapseLogin, SynapseCallback
from fence.errors import InternalError
from fence.resources.audit.utils import enable_audit_logging
from fence.restful import RestfulApi
from fence.config import config
logger = get_logger(__name__)
# Mapping from IDP ID to the name in the URL on the blueprint (see below).
IDP_URL_MAP = {
"fence": "fence",
"google": "google",
"shibboleth": "shib",
"orcid": "orcid",
"synapse": "synapse",
"microsoft": "microsoft",
"okta": "okta",
"cognito": "cognito",
"ras": "ras",
"cilogon": "cilogon",
}
def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
"""
Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
"""
try:
base_url = config["BASE_URL"].rstrip("/")
login_url = base_url + "/login/{}".format(IDP_URL_MAP[provider_id])
except KeyError as e:
raise InternalError("identity provider misconfigured: {}".format(str(e)))
params = {}
if fence_idp:
params["idp"] = fence_idp
if shib_idp:
params["shib_idp"] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url
def provider_info(login_details):
"""
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
"""
info = {
# "id" deprecated, replaced by "idp"
"id": login_details["idp"],
"idp": login_details["idp"],
"name": login_details["name"],
# "url" deprecated, replaced by "urls"
"url": absolute_login_url(login_details["idp"]),
"desc": login_details.get("desc", None),
"secondary": login_details.get("secondary", False),
}
# for Fence multi-tenant login
fence_idp = None
if login_details["idp"] == "fence":
fence_idp = login_details.get("fence_idp")
# handle Shibboleth IDPs: InCommon login can either be configured
# directly in this Fence, or through multi-tenant Fence
if (
login_details["idp"] == "shibboleth" or fence_idp == "shibboleth"
) and "shib_idps" in login_details:
# get list of all available shib IDPs
if not hasattr(flask.current_app, "all_shib_idps"):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details["shib_idps"]
if requested_shib_idps == "*":
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
# get the display names for each requested shib IDP
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next(
(
available_shib_idp
for available_shib_idp in flask.current_app.all_shib_idps
if available_shib_idp["idp"] == requested_shib_idp
),
None,
)
if not shib_idp:
raise InternalError(
'Requested shib_idp "{}" does not exist'.format(
requested_shib_idp
)
)
shib_idps.append(shib_idp)
else:
raise InternalError(
'fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(
requested_shib_idps
)
)
info["urls"] = [
{
"name": shib_idp["name"],
"url": absolute_login_url(
login_details["idp"], fence_idp, shib_idp["idp"]
),
}
for shib_idp in shib_idps
]
# non-Shibboleth provider
else:
info["urls"] = [
{
"name": login_details["name"],
"url": absolute_login_url(login_details["idp"], fence_idp),
}
]
return info
def get_login_providers_info():
# default login option
if config.get("DEFAULT_LOGIN_IDP"):
default_idp = config["DEFAULT_LOGIN_IDP"]
elif "default" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on ENABLED_IDENTITY_PROVIDERS.default
default_idp = config["ENABLED_IDENTITY_PROVIDERS"]["default"]
else:
logger.warning("DEFAULT_LOGIN_IDP not configured")
default_idp = None
# other login options
if config["LOGIN_OPTIONS"]:
login_options = config["LOGIN_OPTIONS"]
elif "providers" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on "providers" and convert to "login_options" format
enabled_providers = config["ENABLED_IDENTITY_PROVIDERS"]["providers"]
login_options = [
{
"name": details.get("name"),
"idp": idp,
"desc": details.get("desc"),
"secondary": details.get("secondary"),
}
for idp, details in enabled_providers.items()
]
else:
logger.warning("LOGIN_OPTIONS not configured or empty")
login_options = []
try:
all_provider_info = [
provider_info(login_details) for login_details in login_options
]
except KeyError as e:
raise InternalError("LOGIN_OPTIONS misconfigured: cannot find key {}".format(e))
# if several login_options are defined for this default IDP, will
# default to the first one:
default_provider_info = next(
(info for info in all_provider_info if info["idp"] == default_idp), None
)
if not default_provider_info:
raise InternalError(
"default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS".format(
default_idp
)
)
return default_provider_info, all_provider_info
def make_login_blueprint():
"""
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
"""
blueprint = flask.Blueprint("login", __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route("", methods=["GET"])
def default_login():
"""
The default root login route.
"""
default_provider_info, all_provider_info = get_login_providers_info()
return flask.jsonify(
{"default_provider": default_provider_info, "providers": all_provider_info}
)
# Add identity provider login routes for IDPs enabled in the config.
configured_idps = config["OPENID_CONNECT"].keys()
if "fence" in configured_idps:
blueprint_api.add_resource(FenceLogin, "/fence", strict_slashes=False)
blueprint_api.add_resource(FenceCallback, "/fence/login", strict_slashes=False)
if "google" in configured_idps:
blueprint_api.add_resource(GoogleLogin, "/google", strict_slashes=False)
blueprint_api.add_resource(
GoogleCallback, "/google/login", strict_slashes=False
)
if "orcid" in configured_idps:
blueprint_api.add_resource(ORCIDLogin, "/orcid", strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, "/orcid/login", strict_slashes=False)
if "ras" in configured_idps:
blueprint_api.add_resource(RASLogin, "/ras", strict_slashes=False)
# note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs
blueprint_api.add_resource(RASCallback, "/ras/callback", strict_slashes=False)
if "synapse" in configured_idps:
blueprint_api.add_resource(SynapseLogin, "/synapse", strict_slashes=False)
blueprint_api.add_resource(
SynapseCallback, "/synapse/login", strict_slashes=False
)
if "microsoft" in configured_idps:
blueprint_api.add_resource(MicrosoftLogin, "/microsoft", strict_slashes=False)
blueprint_api.add_resource(
MicrosoftCallback, "/microsoft/login", strict_slashes=False
)
if "okta" in configured_idps:
blueprint_api.add_resource(OktaLogin, "/okta", strict_slashes=False)
blueprint_api.add_resource(OktaCallback, "/okta/login", strict_slashes=False)
if "cognito" in configured_idps:
blueprint_api.add_resource(CognitoLogin, "/cognito", strict_slashes=False)
blueprint_api.add_resource(
CognitoCallback, "/cognito/login", strict_slashes=False
)
if "shibboleth" in configured_idps:
blueprint_api.add_resource(ShibbolethLogin, "/shib", strict_slashes=False)
blueprint_api.add_resource(
ShibbolethCallback, "/shib/login", strict_slashes=False
)
if "cilogon" in configured_idps:
blueprint_api.add_resource(CilogonLogin, "/cilogon", strict_slashes=False)
blueprint_api.add_resource(
CilogonCallback, "/cilogon/login", strict_slashes=False
)
return blueprint
def get_all_shib_idps():
"""
Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries
"""
url = config["OPENID_CONNECT"].get("fence", {}).get("shibboleth_discovery_url")
if not url:
raise InternalError(
"Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured"
)
res = requests.get(url)
assert (
res.status_code == 200
), "Unable to get list of Shibboleth IDPs from {}".format(url)
all_shib_idps = []
for shib_idp in res.json():
if "entityID" not in shib_idp:
logger.warning(
f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP."
)
continue
idp = shib_idp["entityID"]
if len(shib_idp.get("DisplayNames", [])) > 0:
name = get_shib_idp_en_name(shib_idp["DisplayNames"])
else:
logger.warning(
f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name."
)
name = idp
all_shib_idps.append(
{
"idp": idp,
"name": name,
}
)
return all_shib_idps
def get_shib_idp_en_name(names):
"""
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
"""
for name in names:
if name.get("lang") == "en":
return name["value"]
return names[0]["value"]
|
make_login_blueprint
|
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
|
"""
Create a blueprint with endpoints for logins from configured identity providers.
The identity providers include, for example, Google, Shibboleth, or another
fence instance. See the other files in this directory for the definitions of
the endpoints for each provider.
"""
from authlib.common.urls import add_params_to_uri
import flask
import requests
from cdislogging import get_logger
from fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback
from fence.blueprints.login.cognito import CognitoLogin, CognitoCallback
from fence.blueprints.login.fence_login import FenceLogin, FenceCallback
from fence.blueprints.login.google import GoogleLogin, GoogleCallback
from fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback
from fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback
from fence.blueprints.login.okta import OktaLogin, OktaCallback
from fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback
from fence.blueprints.login.ras import RASLogin, RASCallback
from fence.blueprints.login.synapse import SynapseLogin, SynapseCallback
from fence.errors import InternalError
from fence.resources.audit.utils import enable_audit_logging
from fence.restful import RestfulApi
from fence.config import config
logger = get_logger(__name__)
# Mapping from IDP ID to the name in the URL on the blueprint (see below).
IDP_URL_MAP = {
"fence": "fence",
"google": "google",
"shibboleth": "shib",
"orcid": "orcid",
"synapse": "synapse",
"microsoft": "microsoft",
"okta": "okta",
"cognito": "cognito",
"ras": "ras",
"cilogon": "cilogon",
}
def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
"""
Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
"""
try:
base_url = config["BASE_URL"].rstrip("/")
login_url = base_url + "/login/{}".format(IDP_URL_MAP[provider_id])
except KeyError as e:
raise InternalError("identity provider misconfigured: {}".format(str(e)))
params = {}
if fence_idp:
params["idp"] = fence_idp
if shib_idp:
params["shib_idp"] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url
def provider_info(login_details):
"""
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
"""
info = {
# "id" deprecated, replaced by "idp"
"id": login_details["idp"],
"idp": login_details["idp"],
"name": login_details["name"],
# "url" deprecated, replaced by "urls"
"url": absolute_login_url(login_details["idp"]),
"desc": login_details.get("desc", None),
"secondary": login_details.get("secondary", False),
}
# for Fence multi-tenant login
fence_idp = None
if login_details["idp"] == "fence":
fence_idp = login_details.get("fence_idp")
# handle Shibboleth IDPs: InCommon login can either be configured
# directly in this Fence, or through multi-tenant Fence
if (
login_details["idp"] == "shibboleth" or fence_idp == "shibboleth"
) and "shib_idps" in login_details:
# get list of all available shib IDPs
if not hasattr(flask.current_app, "all_shib_idps"):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details["shib_idps"]
if requested_shib_idps == "*":
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
# get the display names for each requested shib IDP
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next(
(
available_shib_idp
for available_shib_idp in flask.current_app.all_shib_idps
if available_shib_idp["idp"] == requested_shib_idp
),
None,
)
if not shib_idp:
raise InternalError(
'Requested shib_idp "{}" does not exist'.format(
requested_shib_idp
)
)
shib_idps.append(shib_idp)
else:
raise InternalError(
'fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(
requested_shib_idps
)
)
info["urls"] = [
{
"name": shib_idp["name"],
"url": absolute_login_url(
login_details["idp"], fence_idp, shib_idp["idp"]
),
}
for shib_idp in shib_idps
]
# non-Shibboleth provider
else:
info["urls"] = [
{
"name": login_details["name"],
"url": absolute_login_url(login_details["idp"], fence_idp),
}
]
return info
def get_login_providers_info():
# default login option
if config.get("DEFAULT_LOGIN_IDP"):
default_idp = config["DEFAULT_LOGIN_IDP"]
elif "default" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on ENABLED_IDENTITY_PROVIDERS.default
default_idp = config["ENABLED_IDENTITY_PROVIDERS"]["default"]
else:
logger.warning("DEFAULT_LOGIN_IDP not configured")
default_idp = None
# other login options
if config["LOGIN_OPTIONS"]:
login_options = config["LOGIN_OPTIONS"]
elif "providers" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on "providers" and convert to "login_options" format
enabled_providers = config["ENABLED_IDENTITY_PROVIDERS"]["providers"]
login_options = [
{
"name": details.get("name"),
"idp": idp,
"desc": details.get("desc"),
"secondary": details.get("secondary"),
}
for idp, details in enabled_providers.items()
]
else:
logger.warning("LOGIN_OPTIONS not configured or empty")
login_options = []
try:
all_provider_info = [
provider_info(login_details) for login_details in login_options
]
except KeyError as e:
raise InternalError("LOGIN_OPTIONS misconfigured: cannot find key {}".format(e))
# if several login_options are defined for this default IDP, will
# default to the first one:
default_provider_info = next(
(info for info in all_provider_info if info["idp"] == default_idp), None
)
if not default_provider_info:
raise InternalError(
"default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS".format(
default_idp
)
)
return default_provider_info, all_provider_info
# MASKED: make_login_blueprint function (lines 229-307)
def get_all_shib_idps():
"""
Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries
"""
url = config["OPENID_CONNECT"].get("fence", {}).get("shibboleth_discovery_url")
if not url:
raise InternalError(
"Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured"
)
res = requests.get(url)
assert (
res.status_code == 200
), "Unable to get list of Shibboleth IDPs from {}".format(url)
all_shib_idps = []
for shib_idp in res.json():
if "entityID" not in shib_idp:
logger.warning(
f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP."
)
continue
idp = shib_idp["entityID"]
if len(shib_idp.get("DisplayNames", [])) > 0:
name = get_shib_idp_en_name(shib_idp["DisplayNames"])
else:
logger.warning(
f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name."
)
name = idp
all_shib_idps.append(
{
"idp": idp,
"name": name,
}
)
return all_shib_idps
def get_shib_idp_en_name(names):
"""
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
"""
for name in names:
if name.get("lang") == "en":
return name["value"]
return names[0]["value"]
|
def make_login_blueprint():
"""
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
"""
blueprint = flask.Blueprint("login", __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route("", methods=["GET"])
def default_login():
"""
The default root login route.
"""
default_provider_info, all_provider_info = get_login_providers_info()
return flask.jsonify(
{"default_provider": default_provider_info, "providers": all_provider_info}
)
# Add identity provider login routes for IDPs enabled in the config.
configured_idps = config["OPENID_CONNECT"].keys()
if "fence" in configured_idps:
blueprint_api.add_resource(FenceLogin, "/fence", strict_slashes=False)
blueprint_api.add_resource(FenceCallback, "/fence/login", strict_slashes=False)
if "google" in configured_idps:
blueprint_api.add_resource(GoogleLogin, "/google", strict_slashes=False)
blueprint_api.add_resource(
GoogleCallback, "/google/login", strict_slashes=False
)
if "orcid" in configured_idps:
blueprint_api.add_resource(ORCIDLogin, "/orcid", strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, "/orcid/login", strict_slashes=False)
if "ras" in configured_idps:
blueprint_api.add_resource(RASLogin, "/ras", strict_slashes=False)
# note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs
blueprint_api.add_resource(RASCallback, "/ras/callback", strict_slashes=False)
if "synapse" in configured_idps:
blueprint_api.add_resource(SynapseLogin, "/synapse", strict_slashes=False)
blueprint_api.add_resource(
SynapseCallback, "/synapse/login", strict_slashes=False
)
if "microsoft" in configured_idps:
blueprint_api.add_resource(MicrosoftLogin, "/microsoft", strict_slashes=False)
blueprint_api.add_resource(
MicrosoftCallback, "/microsoft/login", strict_slashes=False
)
if "okta" in configured_idps:
blueprint_api.add_resource(OktaLogin, "/okta", strict_slashes=False)
blueprint_api.add_resource(OktaCallback, "/okta/login", strict_slashes=False)
if "cognito" in configured_idps:
blueprint_api.add_resource(CognitoLogin, "/cognito", strict_slashes=False)
blueprint_api.add_resource(
CognitoCallback, "/cognito/login", strict_slashes=False
)
if "shibboleth" in configured_idps:
blueprint_api.add_resource(ShibbolethLogin, "/shib", strict_slashes=False)
blueprint_api.add_resource(
ShibbolethCallback, "/shib/login", strict_slashes=False
)
if "cilogon" in configured_idps:
blueprint_api.add_resource(CilogonLogin, "/cilogon", strict_slashes=False)
blueprint_api.add_resource(
CilogonCallback, "/cilogon/login", strict_slashes=False
)
return blueprint
| 229 | 307 |
"""
Create a blueprint with endpoints for logins from configured identity providers.
The identity providers include, for example, Google, Shibboleth, or another
fence instance. See the other files in this directory for the definitions of
the endpoints for each provider.
"""
from authlib.common.urls import add_params_to_uri
import flask
import requests
from cdislogging import get_logger
from fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback
from fence.blueprints.login.cognito import CognitoLogin, CognitoCallback
from fence.blueprints.login.fence_login import FenceLogin, FenceCallback
from fence.blueprints.login.google import GoogleLogin, GoogleCallback
from fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback
from fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback
from fence.blueprints.login.okta import OktaLogin, OktaCallback
from fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback
from fence.blueprints.login.ras import RASLogin, RASCallback
from fence.blueprints.login.synapse import SynapseLogin, SynapseCallback
from fence.errors import InternalError
from fence.resources.audit.utils import enable_audit_logging
from fence.restful import RestfulApi
from fence.config import config
logger = get_logger(__name__)
# Mapping from IDP ID to the name in the URL on the blueprint (see below).
IDP_URL_MAP = {
"fence": "fence",
"google": "google",
"shibboleth": "shib",
"orcid": "orcid",
"synapse": "synapse",
"microsoft": "microsoft",
"okta": "okta",
"cognito": "cognito",
"ras": "ras",
"cilogon": "cilogon",
}
def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
"""
Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
"""
try:
base_url = config["BASE_URL"].rstrip("/")
login_url = base_url + "/login/{}".format(IDP_URL_MAP[provider_id])
except KeyError as e:
raise InternalError("identity provider misconfigured: {}".format(str(e)))
params = {}
if fence_idp:
params["idp"] = fence_idp
if shib_idp:
params["shib_idp"] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url
def provider_info(login_details):
"""
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
"""
info = {
# "id" deprecated, replaced by "idp"
"id": login_details["idp"],
"idp": login_details["idp"],
"name": login_details["name"],
# "url" deprecated, replaced by "urls"
"url": absolute_login_url(login_details["idp"]),
"desc": login_details.get("desc", None),
"secondary": login_details.get("secondary", False),
}
# for Fence multi-tenant login
fence_idp = None
if login_details["idp"] == "fence":
fence_idp = login_details.get("fence_idp")
# handle Shibboleth IDPs: InCommon login can either be configured
# directly in this Fence, or through multi-tenant Fence
if (
login_details["idp"] == "shibboleth" or fence_idp == "shibboleth"
) and "shib_idps" in login_details:
# get list of all available shib IDPs
if not hasattr(flask.current_app, "all_shib_idps"):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details["shib_idps"]
if requested_shib_idps == "*":
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
# get the display names for each requested shib IDP
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next(
(
available_shib_idp
for available_shib_idp in flask.current_app.all_shib_idps
if available_shib_idp["idp"] == requested_shib_idp
),
None,
)
if not shib_idp:
raise InternalError(
'Requested shib_idp "{}" does not exist'.format(
requested_shib_idp
)
)
shib_idps.append(shib_idp)
else:
raise InternalError(
'fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(
requested_shib_idps
)
)
info["urls"] = [
{
"name": shib_idp["name"],
"url": absolute_login_url(
login_details["idp"], fence_idp, shib_idp["idp"]
),
}
for shib_idp in shib_idps
]
# non-Shibboleth provider
else:
info["urls"] = [
{
"name": login_details["name"],
"url": absolute_login_url(login_details["idp"], fence_idp),
}
]
return info
def get_login_providers_info():
# default login option
if config.get("DEFAULT_LOGIN_IDP"):
default_idp = config["DEFAULT_LOGIN_IDP"]
elif "default" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on ENABLED_IDENTITY_PROVIDERS.default
default_idp = config["ENABLED_IDENTITY_PROVIDERS"]["default"]
else:
logger.warning("DEFAULT_LOGIN_IDP not configured")
default_idp = None
# other login options
if config["LOGIN_OPTIONS"]:
login_options = config["LOGIN_OPTIONS"]
elif "providers" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on "providers" and convert to "login_options" format
enabled_providers = config["ENABLED_IDENTITY_PROVIDERS"]["providers"]
login_options = [
{
"name": details.get("name"),
"idp": idp,
"desc": details.get("desc"),
"secondary": details.get("secondary"),
}
for idp, details in enabled_providers.items()
]
else:
logger.warning("LOGIN_OPTIONS not configured or empty")
login_options = []
try:
all_provider_info = [
provider_info(login_details) for login_details in login_options
]
except KeyError as e:
raise InternalError("LOGIN_OPTIONS misconfigured: cannot find key {}".format(e))
# if several login_options are defined for this default IDP, will
# default to the first one:
default_provider_info = next(
(info for info in all_provider_info if info["idp"] == default_idp), None
)
if not default_provider_info:
raise InternalError(
"default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS".format(
default_idp
)
)
return default_provider_info, all_provider_info
def make_login_blueprint():
"""
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
"""
blueprint = flask.Blueprint("login", __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route("", methods=["GET"])
def default_login():
"""
The default root login route.
"""
default_provider_info, all_provider_info = get_login_providers_info()
return flask.jsonify(
{"default_provider": default_provider_info, "providers": all_provider_info}
)
# Add identity provider login routes for IDPs enabled in the config.
configured_idps = config["OPENID_CONNECT"].keys()
if "fence" in configured_idps:
blueprint_api.add_resource(FenceLogin, "/fence", strict_slashes=False)
blueprint_api.add_resource(FenceCallback, "/fence/login", strict_slashes=False)
if "google" in configured_idps:
blueprint_api.add_resource(GoogleLogin, "/google", strict_slashes=False)
blueprint_api.add_resource(
GoogleCallback, "/google/login", strict_slashes=False
)
if "orcid" in configured_idps:
blueprint_api.add_resource(ORCIDLogin, "/orcid", strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, "/orcid/login", strict_slashes=False)
if "ras" in configured_idps:
blueprint_api.add_resource(RASLogin, "/ras", strict_slashes=False)
# note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs
blueprint_api.add_resource(RASCallback, "/ras/callback", strict_slashes=False)
if "synapse" in configured_idps:
blueprint_api.add_resource(SynapseLogin, "/synapse", strict_slashes=False)
blueprint_api.add_resource(
SynapseCallback, "/synapse/login", strict_slashes=False
)
if "microsoft" in configured_idps:
blueprint_api.add_resource(MicrosoftLogin, "/microsoft", strict_slashes=False)
blueprint_api.add_resource(
MicrosoftCallback, "/microsoft/login", strict_slashes=False
)
if "okta" in configured_idps:
blueprint_api.add_resource(OktaLogin, "/okta", strict_slashes=False)
blueprint_api.add_resource(OktaCallback, "/okta/login", strict_slashes=False)
if "cognito" in configured_idps:
blueprint_api.add_resource(CognitoLogin, "/cognito", strict_slashes=False)
blueprint_api.add_resource(
CognitoCallback, "/cognito/login", strict_slashes=False
)
if "shibboleth" in configured_idps:
blueprint_api.add_resource(ShibbolethLogin, "/shib", strict_slashes=False)
blueprint_api.add_resource(
ShibbolethCallback, "/shib/login", strict_slashes=False
)
if "cilogon" in configured_idps:
blueprint_api.add_resource(CilogonLogin, "/cilogon", strict_slashes=False)
blueprint_api.add_resource(
CilogonCallback, "/cilogon/login", strict_slashes=False
)
return blueprint
def get_all_shib_idps():
"""
Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries
"""
url = config["OPENID_CONNECT"].get("fence", {}).get("shibboleth_discovery_url")
if not url:
raise InternalError(
"Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured"
)
res = requests.get(url)
assert (
res.status_code == 200
), "Unable to get list of Shibboleth IDPs from {}".format(url)
all_shib_idps = []
for shib_idp in res.json():
if "entityID" not in shib_idp:
logger.warning(
f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP."
)
continue
idp = shib_idp["entityID"]
if len(shib_idp.get("DisplayNames", [])) > 0:
name = get_shib_idp_en_name(shib_idp["DisplayNames"])
else:
logger.warning(
f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name."
)
name = idp
all_shib_idps.append(
{
"idp": idp,
"name": name,
}
)
return all_shib_idps
def get_shib_idp_en_name(names):
"""
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
"""
for name in names:
if name.get("lang") == "en":
return name["value"]
return names[0]["value"]
|
get_shib_idp_en_name
|
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
|
"""
Create a blueprint with endpoints for logins from configured identity providers.
The identity providers include, for example, Google, Shibboleth, or another
fence instance. See the other files in this directory for the definitions of
the endpoints for each provider.
"""
from authlib.common.urls import add_params_to_uri
import flask
import requests
from cdislogging import get_logger
from fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback
from fence.blueprints.login.cognito import CognitoLogin, CognitoCallback
from fence.blueprints.login.fence_login import FenceLogin, FenceCallback
from fence.blueprints.login.google import GoogleLogin, GoogleCallback
from fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback
from fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback
from fence.blueprints.login.okta import OktaLogin, OktaCallback
from fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback
from fence.blueprints.login.ras import RASLogin, RASCallback
from fence.blueprints.login.synapse import SynapseLogin, SynapseCallback
from fence.errors import InternalError
from fence.resources.audit.utils import enable_audit_logging
from fence.restful import RestfulApi
from fence.config import config
logger = get_logger(__name__)
# Mapping from IDP ID to the name in the URL on the blueprint (see below).
IDP_URL_MAP = {
"fence": "fence",
"google": "google",
"shibboleth": "shib",
"orcid": "orcid",
"synapse": "synapse",
"microsoft": "microsoft",
"okta": "okta",
"cognito": "cognito",
"ras": "ras",
"cilogon": "cilogon",
}
def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
"""
Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
"""
try:
base_url = config["BASE_URL"].rstrip("/")
login_url = base_url + "/login/{}".format(IDP_URL_MAP[provider_id])
except KeyError as e:
raise InternalError("identity provider misconfigured: {}".format(str(e)))
params = {}
if fence_idp:
params["idp"] = fence_idp
if shib_idp:
params["shib_idp"] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url
def provider_info(login_details):
"""
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
"""
info = {
# "id" deprecated, replaced by "idp"
"id": login_details["idp"],
"idp": login_details["idp"],
"name": login_details["name"],
# "url" deprecated, replaced by "urls"
"url": absolute_login_url(login_details["idp"]),
"desc": login_details.get("desc", None),
"secondary": login_details.get("secondary", False),
}
# for Fence multi-tenant login
fence_idp = None
if login_details["idp"] == "fence":
fence_idp = login_details.get("fence_idp")
# handle Shibboleth IDPs: InCommon login can either be configured
# directly in this Fence, or through multi-tenant Fence
if (
login_details["idp"] == "shibboleth" or fence_idp == "shibboleth"
) and "shib_idps" in login_details:
# get list of all available shib IDPs
if not hasattr(flask.current_app, "all_shib_idps"):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details["shib_idps"]
if requested_shib_idps == "*":
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
# get the display names for each requested shib IDP
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next(
(
available_shib_idp
for available_shib_idp in flask.current_app.all_shib_idps
if available_shib_idp["idp"] == requested_shib_idp
),
None,
)
if not shib_idp:
raise InternalError(
'Requested shib_idp "{}" does not exist'.format(
requested_shib_idp
)
)
shib_idps.append(shib_idp)
else:
raise InternalError(
'fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(
requested_shib_idps
)
)
info["urls"] = [
{
"name": shib_idp["name"],
"url": absolute_login_url(
login_details["idp"], fence_idp, shib_idp["idp"]
),
}
for shib_idp in shib_idps
]
# non-Shibboleth provider
else:
info["urls"] = [
{
"name": login_details["name"],
"url": absolute_login_url(login_details["idp"], fence_idp),
}
]
return info
def get_login_providers_info():
# default login option
if config.get("DEFAULT_LOGIN_IDP"):
default_idp = config["DEFAULT_LOGIN_IDP"]
elif "default" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on ENABLED_IDENTITY_PROVIDERS.default
default_idp = config["ENABLED_IDENTITY_PROVIDERS"]["default"]
else:
logger.warning("DEFAULT_LOGIN_IDP not configured")
default_idp = None
# other login options
if config["LOGIN_OPTIONS"]:
login_options = config["LOGIN_OPTIONS"]
elif "providers" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on "providers" and convert to "login_options" format
enabled_providers = config["ENABLED_IDENTITY_PROVIDERS"]["providers"]
login_options = [
{
"name": details.get("name"),
"idp": idp,
"desc": details.get("desc"),
"secondary": details.get("secondary"),
}
for idp, details in enabled_providers.items()
]
else:
logger.warning("LOGIN_OPTIONS not configured or empty")
login_options = []
try:
all_provider_info = [
provider_info(login_details) for login_details in login_options
]
except KeyError as e:
raise InternalError("LOGIN_OPTIONS misconfigured: cannot find key {}".format(e))
# if several login_options are defined for this default IDP, will
# default to the first one:
default_provider_info = next(
(info for info in all_provider_info if info["idp"] == default_idp), None
)
if not default_provider_info:
raise InternalError(
"default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS".format(
default_idp
)
)
return default_provider_info, all_provider_info
def make_login_blueprint():
"""
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
"""
blueprint = flask.Blueprint("login", __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route("", methods=["GET"])
def default_login():
"""
The default root login route.
"""
default_provider_info, all_provider_info = get_login_providers_info()
return flask.jsonify(
{"default_provider": default_provider_info, "providers": all_provider_info}
)
# Add identity provider login routes for IDPs enabled in the config.
configured_idps = config["OPENID_CONNECT"].keys()
if "fence" in configured_idps:
blueprint_api.add_resource(FenceLogin, "/fence", strict_slashes=False)
blueprint_api.add_resource(FenceCallback, "/fence/login", strict_slashes=False)
if "google" in configured_idps:
blueprint_api.add_resource(GoogleLogin, "/google", strict_slashes=False)
blueprint_api.add_resource(
GoogleCallback, "/google/login", strict_slashes=False
)
if "orcid" in configured_idps:
blueprint_api.add_resource(ORCIDLogin, "/orcid", strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, "/orcid/login", strict_slashes=False)
if "ras" in configured_idps:
blueprint_api.add_resource(RASLogin, "/ras", strict_slashes=False)
# note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs
blueprint_api.add_resource(RASCallback, "/ras/callback", strict_slashes=False)
if "synapse" in configured_idps:
blueprint_api.add_resource(SynapseLogin, "/synapse", strict_slashes=False)
blueprint_api.add_resource(
SynapseCallback, "/synapse/login", strict_slashes=False
)
if "microsoft" in configured_idps:
blueprint_api.add_resource(MicrosoftLogin, "/microsoft", strict_slashes=False)
blueprint_api.add_resource(
MicrosoftCallback, "/microsoft/login", strict_slashes=False
)
if "okta" in configured_idps:
blueprint_api.add_resource(OktaLogin, "/okta", strict_slashes=False)
blueprint_api.add_resource(OktaCallback, "/okta/login", strict_slashes=False)
if "cognito" in configured_idps:
blueprint_api.add_resource(CognitoLogin, "/cognito", strict_slashes=False)
blueprint_api.add_resource(
CognitoCallback, "/cognito/login", strict_slashes=False
)
if "shibboleth" in configured_idps:
blueprint_api.add_resource(ShibbolethLogin, "/shib", strict_slashes=False)
blueprint_api.add_resource(
ShibbolethCallback, "/shib/login", strict_slashes=False
)
if "cilogon" in configured_idps:
blueprint_api.add_resource(CilogonLogin, "/cilogon", strict_slashes=False)
blueprint_api.add_resource(
CilogonCallback, "/cilogon/login", strict_slashes=False
)
return blueprint
def get_all_shib_idps():
"""
Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries
"""
url = config["OPENID_CONNECT"].get("fence", {}).get("shibboleth_discovery_url")
if not url:
raise InternalError(
"Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured"
)
res = requests.get(url)
assert (
res.status_code == 200
), "Unable to get list of Shibboleth IDPs from {}".format(url)
all_shib_idps = []
for shib_idp in res.json():
if "entityID" not in shib_idp:
logger.warning(
f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP."
)
continue
idp = shib_idp["entityID"]
if len(shib_idp.get("DisplayNames", [])) > 0:
name = get_shib_idp_en_name(shib_idp["DisplayNames"])
else:
logger.warning(
f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name."
)
name = idp
all_shib_idps.append(
{
"idp": idp,
"name": name,
}
)
return all_shib_idps
# MASKED: get_shib_idp_en_name function (lines 352-377)
|
def get_shib_idp_en_name(names):
"""
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
"""
for name in names:
if name.get("lang") == "en":
return name["value"]
return names[0]["value"]
| 352 | 377 |
"""
Create a blueprint with endpoints for logins from configured identity providers.
The identity providers include, for example, Google, Shibboleth, or another
fence instance. See the other files in this directory for the definitions of
the endpoints for each provider.
"""
from authlib.common.urls import add_params_to_uri
import flask
import requests
from cdislogging import get_logger
from fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback
from fence.blueprints.login.cognito import CognitoLogin, CognitoCallback
from fence.blueprints.login.fence_login import FenceLogin, FenceCallback
from fence.blueprints.login.google import GoogleLogin, GoogleCallback
from fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback
from fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback
from fence.blueprints.login.okta import OktaLogin, OktaCallback
from fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback
from fence.blueprints.login.ras import RASLogin, RASCallback
from fence.blueprints.login.synapse import SynapseLogin, SynapseCallback
from fence.errors import InternalError
from fence.resources.audit.utils import enable_audit_logging
from fence.restful import RestfulApi
from fence.config import config
logger = get_logger(__name__)
# Mapping from IDP ID to the name in the URL on the blueprint (see below).
IDP_URL_MAP = {
"fence": "fence",
"google": "google",
"shibboleth": "shib",
"orcid": "orcid",
"synapse": "synapse",
"microsoft": "microsoft",
"okta": "okta",
"cognito": "cognito",
"ras": "ras",
"cilogon": "cilogon",
}
def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
"""
Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
"""
try:
base_url = config["BASE_URL"].rstrip("/")
login_url = base_url + "/login/{}".format(IDP_URL_MAP[provider_id])
except KeyError as e:
raise InternalError("identity provider misconfigured: {}".format(str(e)))
params = {}
if fence_idp:
params["idp"] = fence_idp
if shib_idp:
params["shib_idp"] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url
def provider_info(login_details):
"""
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
"""
info = {
# "id" deprecated, replaced by "idp"
"id": login_details["idp"],
"idp": login_details["idp"],
"name": login_details["name"],
# "url" deprecated, replaced by "urls"
"url": absolute_login_url(login_details["idp"]),
"desc": login_details.get("desc", None),
"secondary": login_details.get("secondary", False),
}
# for Fence multi-tenant login
fence_idp = None
if login_details["idp"] == "fence":
fence_idp = login_details.get("fence_idp")
# handle Shibboleth IDPs: InCommon login can either be configured
# directly in this Fence, or through multi-tenant Fence
if (
login_details["idp"] == "shibboleth" or fence_idp == "shibboleth"
) and "shib_idps" in login_details:
# get list of all available shib IDPs
if not hasattr(flask.current_app, "all_shib_idps"):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details["shib_idps"]
if requested_shib_idps == "*":
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
# get the display names for each requested shib IDP
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next(
(
available_shib_idp
for available_shib_idp in flask.current_app.all_shib_idps
if available_shib_idp["idp"] == requested_shib_idp
),
None,
)
if not shib_idp:
raise InternalError(
'Requested shib_idp "{}" does not exist'.format(
requested_shib_idp
)
)
shib_idps.append(shib_idp)
else:
raise InternalError(
'fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(
requested_shib_idps
)
)
info["urls"] = [
{
"name": shib_idp["name"],
"url": absolute_login_url(
login_details["idp"], fence_idp, shib_idp["idp"]
),
}
for shib_idp in shib_idps
]
# non-Shibboleth provider
else:
info["urls"] = [
{
"name": login_details["name"],
"url": absolute_login_url(login_details["idp"], fence_idp),
}
]
return info
def get_login_providers_info():
# default login option
if config.get("DEFAULT_LOGIN_IDP"):
default_idp = config["DEFAULT_LOGIN_IDP"]
elif "default" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on ENABLED_IDENTITY_PROVIDERS.default
default_idp = config["ENABLED_IDENTITY_PROVIDERS"]["default"]
else:
logger.warning("DEFAULT_LOGIN_IDP not configured")
default_idp = None
# other login options
if config["LOGIN_OPTIONS"]:
login_options = config["LOGIN_OPTIONS"]
elif "providers" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on "providers" and convert to "login_options" format
enabled_providers = config["ENABLED_IDENTITY_PROVIDERS"]["providers"]
login_options = [
{
"name": details.get("name"),
"idp": idp,
"desc": details.get("desc"),
"secondary": details.get("secondary"),
}
for idp, details in enabled_providers.items()
]
else:
logger.warning("LOGIN_OPTIONS not configured or empty")
login_options = []
try:
all_provider_info = [
provider_info(login_details) for login_details in login_options
]
except KeyError as e:
raise InternalError("LOGIN_OPTIONS misconfigured: cannot find key {}".format(e))
# if several login_options are defined for this default IDP, will
# default to the first one:
default_provider_info = next(
(info for info in all_provider_info if info["idp"] == default_idp), None
)
if not default_provider_info:
raise InternalError(
"default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS".format(
default_idp
)
)
return default_provider_info, all_provider_info
def make_login_blueprint():
"""
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
"""
blueprint = flask.Blueprint("login", __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route("", methods=["GET"])
def default_login():
"""
The default root login route.
"""
default_provider_info, all_provider_info = get_login_providers_info()
return flask.jsonify(
{"default_provider": default_provider_info, "providers": all_provider_info}
)
# Add identity provider login routes for IDPs enabled in the config.
configured_idps = config["OPENID_CONNECT"].keys()
if "fence" in configured_idps:
blueprint_api.add_resource(FenceLogin, "/fence", strict_slashes=False)
blueprint_api.add_resource(FenceCallback, "/fence/login", strict_slashes=False)
if "google" in configured_idps:
blueprint_api.add_resource(GoogleLogin, "/google", strict_slashes=False)
blueprint_api.add_resource(
GoogleCallback, "/google/login", strict_slashes=False
)
if "orcid" in configured_idps:
blueprint_api.add_resource(ORCIDLogin, "/orcid", strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, "/orcid/login", strict_slashes=False)
if "ras" in configured_idps:
blueprint_api.add_resource(RASLogin, "/ras", strict_slashes=False)
# note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs
blueprint_api.add_resource(RASCallback, "/ras/callback", strict_slashes=False)
if "synapse" in configured_idps:
blueprint_api.add_resource(SynapseLogin, "/synapse", strict_slashes=False)
blueprint_api.add_resource(
SynapseCallback, "/synapse/login", strict_slashes=False
)
if "microsoft" in configured_idps:
blueprint_api.add_resource(MicrosoftLogin, "/microsoft", strict_slashes=False)
blueprint_api.add_resource(
MicrosoftCallback, "/microsoft/login", strict_slashes=False
)
if "okta" in configured_idps:
blueprint_api.add_resource(OktaLogin, "/okta", strict_slashes=False)
blueprint_api.add_resource(OktaCallback, "/okta/login", strict_slashes=False)
if "cognito" in configured_idps:
blueprint_api.add_resource(CognitoLogin, "/cognito", strict_slashes=False)
blueprint_api.add_resource(
CognitoCallback, "/cognito/login", strict_slashes=False
)
if "shibboleth" in configured_idps:
blueprint_api.add_resource(ShibbolethLogin, "/shib", strict_slashes=False)
blueprint_api.add_resource(
ShibbolethCallback, "/shib/login", strict_slashes=False
)
if "cilogon" in configured_idps:
blueprint_api.add_resource(CilogonLogin, "/cilogon", strict_slashes=False)
blueprint_api.add_resource(
CilogonCallback, "/cilogon/login", strict_slashes=False
)
return blueprint
def get_all_shib_idps():
"""
Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries
"""
url = config["OPENID_CONNECT"].get("fence", {}).get("shibboleth_discovery_url")
if not url:
raise InternalError(
"Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured"
)
res = requests.get(url)
assert (
res.status_code == 200
), "Unable to get list of Shibboleth IDPs from {}".format(url)
all_shib_idps = []
for shib_idp in res.json():
if "entityID" not in shib_idp:
logger.warning(
f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP."
)
continue
idp = shib_idp["entityID"]
if len(shib_idp.get("DisplayNames", [])) > 0:
name = get_shib_idp_en_name(shib_idp["DisplayNames"])
else:
logger.warning(
f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name."
)
name = idp
all_shib_idps.append(
{
"idp": idp,
"name": name,
}
)
return all_shib_idps
def get_shib_idp_en_name(names):
"""
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
"""
for name in names:
if name.get("lang") == "en":
return name["value"]
return names[0]["value"]
|
get
|
Get an existing VpnSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VpnSiteArgs', 'VpnSite']
@pulumi.input_type
class VpnSiteArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
address_space: Optional[pulumi.Input['AddressSpaceArgs']] = None,
bgp_properties: Optional[pulumi.Input['BgpSettingsArgs']] = None,
device_properties: Optional[pulumi.Input['DevicePropertiesArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input['SubResourceArgs']] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VpnSite resource.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties.
:param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if address_space is not None:
pulumi.set(__self__, "address_space", address_space)
if bgp_properties is not None:
pulumi.set(__self__, "bgp_properties", bgp_properties)
if device_properties is not None:
pulumi.set(__self__, "device_properties", device_properties)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if is_security_site is not None:
pulumi.set(__self__, "is_security_site", is_security_site)
if location is not None:
pulumi.set(__self__, "location", location)
if site_key is not None:
pulumi.set(__self__, "site_key", site_key)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_wan is not None:
pulumi.set(__self__, "virtual_wan", virtual_wan)
if vpn_site_links is not None:
pulumi.set(__self__, "vpn_site_links", vpn_site_links)
if vpn_site_name is not None:
pulumi.set(__self__, "vpn_site_name", vpn_site_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name of the VpnSite.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> Optional[pulumi.Input['AddressSpaceArgs']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@address_space.setter
def address_space(self, value: Optional[pulumi.Input['AddressSpaceArgs']]):
pulumi.set(self, "address_space", value)
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> Optional[pulumi.Input['BgpSettingsArgs']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@bgp_properties.setter
def bgp_properties(self, value: Optional[pulumi.Input['BgpSettingsArgs']]):
pulumi.set(self, "bgp_properties", value)
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> Optional[pulumi.Input['DevicePropertiesArgs']]:
"""
The device properties.
"""
return pulumi.get(self, "device_properties")
@device_properties.setter
def device_properties(self, value: Optional[pulumi.Input['DevicePropertiesArgs']]):
pulumi.set(self, "device_properties", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> Optional[pulumi.Input[bool]]:
"""
IsSecuritySite flag.
"""
return pulumi.get(self, "is_security_site")
@is_security_site.setter
def is_security_site(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_security_site", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> Optional[pulumi.Input[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@site_key.setter
def site_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "site_key", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The VirtualWAN to which the vpnSite belongs.
"""
return pulumi.get(self, "virtual_wan")
@virtual_wan.setter
def virtual_wan(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_wan", value)
@property
@pulumi.getter(name="vpnSiteLinks")
def vpn_site_links(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]:
"""
List of all vpn site links.
"""
return pulumi.get(self, "vpn_site_links")
@vpn_site_links.setter
def vpn_site_links(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]):
pulumi.set(self, "vpn_site_links", value)
@property
@pulumi.getter(name="vpnSiteName")
def vpn_site_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the VpnSite being created or updated.
"""
return pulumi.get(self, "vpn_site_name")
@vpn_site_name.setter
def vpn_site_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpn_site_name", value)
class VpnSite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.
:param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VpnSiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param VpnSiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpnSiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__["address_space"] = address_space
__props__.__dict__["bgp_properties"] = bgp_properties
__props__.__dict__["device_properties"] = device_properties
__props__.__dict__["id"] = id
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["is_security_site"] = is_security_site
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["site_key"] = site_key
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_wan"] = virtual_wan
__props__.__dict__["vpn_site_links"] = vpn_site_links
__props__.__dict__["vpn_site_name"] = vpn_site_name
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200301:VpnSite"), pulumi.Alias(type_="azure-native:network:VpnSite"), pulumi.Alias(type_="azure-nextgen:network:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20201101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20201101:VpnSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VpnSite, __self__).__init__(
'azure-native:network/v20200301:VpnSite',
resource_name,
__props__,
opts)
# MASKED: get function (lines 344-374)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> pulumi.Output[Optional['outputs.DevicePropertiesResponse']]:
"""
The device properties.
"""
return pulumi.get(self, "device_properties")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> pulumi.Output[Optional[bool]]:
"""
IsSecuritySite flag.
"""
return pulumi.get(self, "is_security_site")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the VPN site resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> pulumi.Output[Optional[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The VirtualWAN to which the vpnSite belongs.
"""
return pulumi.get(self, "virtual_wan")
@property
@pulumi.getter(name="vpnSiteLinks")
def vpn_site_links(self) -> pulumi.Output[Optional[Sequence['outputs.VpnSiteLinkResponse']]]:
"""
List of all vpn site links.
"""
return pulumi.get(self, "vpn_site_links")
|
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnSite':
"""
Get an existing VpnSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__["address_space"] = None
__props__.__dict__["bgp_properties"] = None
__props__.__dict__["device_properties"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["ip_address"] = None
__props__.__dict__["is_security_site"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["site_key"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_wan"] = None
__props__.__dict__["vpn_site_links"] = None
return VpnSite(resource_name, opts=opts, __props__=__props__)
| 344 | 374 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VpnSiteArgs', 'VpnSite']
@pulumi.input_type
class VpnSiteArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
address_space: Optional[pulumi.Input['AddressSpaceArgs']] = None,
bgp_properties: Optional[pulumi.Input['BgpSettingsArgs']] = None,
device_properties: Optional[pulumi.Input['DevicePropertiesArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input['SubResourceArgs']] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VpnSite resource.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties.
:param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if address_space is not None:
pulumi.set(__self__, "address_space", address_space)
if bgp_properties is not None:
pulumi.set(__self__, "bgp_properties", bgp_properties)
if device_properties is not None:
pulumi.set(__self__, "device_properties", device_properties)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if is_security_site is not None:
pulumi.set(__self__, "is_security_site", is_security_site)
if location is not None:
pulumi.set(__self__, "location", location)
if site_key is not None:
pulumi.set(__self__, "site_key", site_key)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_wan is not None:
pulumi.set(__self__, "virtual_wan", virtual_wan)
if vpn_site_links is not None:
pulumi.set(__self__, "vpn_site_links", vpn_site_links)
if vpn_site_name is not None:
pulumi.set(__self__, "vpn_site_name", vpn_site_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name of the VpnSite.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> Optional[pulumi.Input['AddressSpaceArgs']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@address_space.setter
def address_space(self, value: Optional[pulumi.Input['AddressSpaceArgs']]):
pulumi.set(self, "address_space", value)
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> Optional[pulumi.Input['BgpSettingsArgs']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@bgp_properties.setter
def bgp_properties(self, value: Optional[pulumi.Input['BgpSettingsArgs']]):
pulumi.set(self, "bgp_properties", value)
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> Optional[pulumi.Input['DevicePropertiesArgs']]:
"""
The device properties.
"""
return pulumi.get(self, "device_properties")
@device_properties.setter
def device_properties(self, value: Optional[pulumi.Input['DevicePropertiesArgs']]):
pulumi.set(self, "device_properties", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> Optional[pulumi.Input[bool]]:
"""
IsSecuritySite flag.
"""
return pulumi.get(self, "is_security_site")
@is_security_site.setter
def is_security_site(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_security_site", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> Optional[pulumi.Input[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@site_key.setter
def site_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "site_key", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The VirtualWAN to which the vpnSite belongs.
"""
return pulumi.get(self, "virtual_wan")
@virtual_wan.setter
def virtual_wan(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_wan", value)
@property
@pulumi.getter(name="vpnSiteLinks")
def vpn_site_links(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]:
"""
List of all vpn site links.
"""
return pulumi.get(self, "vpn_site_links")
@vpn_site_links.setter
def vpn_site_links(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]):
pulumi.set(self, "vpn_site_links", value)
@property
@pulumi.getter(name="vpnSiteName")
def vpn_site_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the VpnSite being created or updated.
"""
return pulumi.get(self, "vpn_site_name")
@vpn_site_name.setter
def vpn_site_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpn_site_name", value)
class VpnSite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.
:param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VpnSiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param VpnSiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpnSiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__["address_space"] = address_space
__props__.__dict__["bgp_properties"] = bgp_properties
__props__.__dict__["device_properties"] = device_properties
__props__.__dict__["id"] = id
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["is_security_site"] = is_security_site
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["site_key"] = site_key
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_wan"] = virtual_wan
__props__.__dict__["vpn_site_links"] = vpn_site_links
__props__.__dict__["vpn_site_name"] = vpn_site_name
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200301:VpnSite"), pulumi.Alias(type_="azure-native:network:VpnSite"), pulumi.Alias(type_="azure-nextgen:network:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20201101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20201101:VpnSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VpnSite, __self__).__init__(
'azure-native:network/v20200301:VpnSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnSite':
"""
Get an existing VpnSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__["address_space"] = None
__props__.__dict__["bgp_properties"] = None
__props__.__dict__["device_properties"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["ip_address"] = None
__props__.__dict__["is_security_site"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["site_key"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_wan"] = None
__props__.__dict__["vpn_site_links"] = None
return VpnSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> pulumi.Output[Optional['outputs.DevicePropertiesResponse']]:
"""
The device properties.
"""
return pulumi.get(self, "device_properties")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> pulumi.Output[Optional[bool]]:
"""
IsSecuritySite flag.
"""
return pulumi.get(self, "is_security_site")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the VPN site resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> pulumi.Output[Optional[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The VirtualWAN to which the vpnSite belongs.
"""
return pulumi.get(self, "virtual_wan")
@property
@pulumi.getter(name="vpnSiteLinks")
def vpn_site_links(self) -> pulumi.Output[Optional[Sequence['outputs.VpnSiteLinkResponse']]]:
"""
List of all vpn site links.
"""
return pulumi.get(self, "vpn_site_links")
|
tensorize_gains
|
Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq
|
import numpy as np
import tensorflow as tf
from pyuvdata import UVData, UVCal, UVFlag
from . import utils
import copy
import argparse
import itertools
import datetime
from pyuvdata import utils as uvutils
from .utils import echo
from .utils import PBARS
from . import cal_utils
from . import modeling
import re
OPTIMIZERS = {
"Adadelta": tf.optimizers.Adadelta,
"Adam": tf.optimizers.Adam,
"Adamax": tf.optimizers.Adamax,
"Ftrl": tf.optimizers.Ftrl,
"Nadam": tf.optimizers.Nadam,
"SGD": tf.optimizers.SGD,
"RMSprop": tf.optimizers.RMSprop,
"Adagrad": tf.optimizers.Adagrad
}
def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"""
Order dict keys in order of number of baselines in each group
chunk fit_groups in fg_model_comps_dict into chunks where all groups in the
same chunk have the same number of baselines in each group.
Parameters
----------
fg_model_comps_dict: dict
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
use_redundancy: bool, optional
If False, break fitting groups with the same number of baselines in each redundant
sub_group into different fitting groups with no redundancy in each
redundant subgroup. This is to prevent fitting groups with single
redundant groups of varying lengths from being lumped into different chunks
increasing the number of chunks has a more significant impact on run-time
then increasing the number of baselines in each chunk.
default is False.
Returns:
fg_model_comps_dict_chunked: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
"""
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if not use_redundancy:
# We can remove redundancies for fitting groups of baselines that have the same
# number of elements in each redundant group.
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
# only break up groups with small numbers of group elements.
if np.allclose(rlens, np.mean(rlens)) and len(rlens) < grp_size_threshold:
# split up groups.
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if nbl in chunked_keys:
chunked_keys[nbl].append(fit_grp)
if fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]:
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked
def tensorize_fg_model_comps_dict(
fg_model_comps_dict,
ants_map,
nfreqs,
use_redundancy=False,
dtype=np.float32,
notebook_progressbar=False,
verbose=False,
grp_size_threshold=5,
):
"""Convert per-baseline model components into a Ndata x Ncomponent tensor
Parameters
----------
fg_model_comps_dict: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
nfreqs: int, optional
number of frequency channels
dtype: numpy.dtype
tensor data types
default is np.float32
Returns
-------
fg_model_comps: list
list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)
where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same
modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls
to be the maximum number of vectors representing any of the ngrps baseline grps
which means that many rows in nvecs will be zero. For example, if we are modeling with
vectors that all span nbls=1 baseline and using delay-modes to model our data
then nvecs will equal the largest number of delay modes necessary to model the wedge
on all baselines even though the short baselines are described by far fewer modes
on short baselines, most of the rows along the vector dimension will therefor be zero.
This is wasteful of memory but it allows us to take advantage of the fast
dense matrix operations on a GPU.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
"""
echo(
f"{datetime.datetime.now()} Computing foreground components matrices...\n",
verbose=verbose,
)
# chunk foreground components.
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(
fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold
)
fg_model_comps = []
corr_inds = []
for nbls, nvecs in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for grpnum, modeling_grp in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for rgrpnum, red_grp in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice(rgrpnum * nfreqs, (rgrpnum + 1) * nfreqs)
dslice = slice(nbl * nfreqs, (nbl + 1) * nfreqs)
modeling_matrix[vecslice, grpnum, nbl] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][
compslice
].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return fg_model_comps, corr_inds
def tensorize_data(
uvdata,
corr_inds,
ants_map,
polarization,
time,
data_scale_factor=1.0,
weights=None,
nsamples_in_weights=False,
dtype=np.float32,
):
"""Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples.
"""
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = ants_map_inv[i], ants_map_inv[j]
bl = ap + (polarization,)
dinds1, dinds2, pol_ind = uvdata._key2inds(bl)
if len(dinds1) > 0:
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze()
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[i, j] = data.real.astype(dtype)
data_i[i, j] = data.imag.astype(dtype)
if weights is None:
wgts[i, j] = iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
else:
if ap in weights.get_antpairs():
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::-1])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]]
polnum = np.where(
weights.polarization_array
== uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)
)[0][0]
wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
wgtsum += np.sum(wgts[i, j])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return data_r, data_i, wgts
def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
"""Remove arbitrary phase and amplitude from deconvolved model and gains.
Parameters
----------
uvdata_reference_model: UVData object
Reference model for "true" visibilities.
uvdata_deconv: UVData object
"Deconvolved" data solved for in self-cal loop.
gains: UVCal object
Gains solved for in self-cal loop.
polarization: str
Polarization string to compute phase and amplitude correction for.
additional_flags: np.ndarray
Any additional flags you wish to use for excluding data from normalization
fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.
default is None -> Only exclude data in flags from reference model and deconv from
determinging normalization.
Returns
-------
N/A: Modifies uvdata_deconv and gains in-place.
"""
# compute and multiply out scale-factor accounting for overall amplitude and phase degeneracy.
polnum_data = np.where(
uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-7, rtol=0.0)
selection = (
~uvdata_deconv.flag_array[bltsel, :, :, polnum_data]
& ~uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]
)
if additional_flags is not None:
selection = selection & ~additional_flags[bltsel, :, :, polnum_data]
data_ratio = (
uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection]
/ uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection]
)
data_ratio[~np.isfinite(data_ratio)] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean(np.abs(data_ratio) ** 2.0))
scale_factor = scale_factor_abs # * np.exp(1j * scale_factor_phase) Need to figure this out later.
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where(
gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor) ** -0.5
# MASKED: tensorize_gains function (lines 368-398)
def yield_fg_model_array(
nants,
nfreqs,
fg_model_comps,
fg_coeffs,
corr_inds,
):
"""Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data
"""
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum(fg_coeffs[cnum] * fg_model_comps[cnum], axis=0).numpy()
for gnum in range(ngrps):
for blnum, (i, j) in enumerate(corr_inds[cnum][gnum]):
model[i, j] = gchunk[gnum, blnum]
return model
def fit_gains_and_foregrounds(
g_r,
g_i,
fg_r,
fg_i,
data_r,
data_i,
wgts,
fg_comps,
corr_inds,
use_min=False,
tol=1e-14,
maxsteps=10000,
optimizer="Adamax",
freeze_model=False,
verbose=False,
notebook_progressbar=False,
dtype=np.float32,
graph_mode=False,
n_profile_steps=0,
profile_log_dir="./logdir",
sky_model_r=None,
sky_model_i=None,
model_regularization=None,
graph_args_dict=None,
**opt_kwargs,
):
"""Run optimization loop to fit gains and foreground components.
Parameters
----------
g_r: tf.Tensor object.
tf.Tensor object holding real parts of gains.
g_i: tf.Tensor object.
tf.Tensor object holding imag parts of gains.
fg_r: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding foreground coeffs.
fg_i: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding imag coeffs.
data_r: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
real part of data to fit.
data_i: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
imag part of data to fit.
wgts: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
fg_comps: list:
list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)
represents vectors to be used in modeling visibilities.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
use_min: bool, optional
if True, use the value that minimizes the loss function
regardless of where optimization loop ended up
(prevents overshooting due to excess momentum)
tol: float, optional
halt optimization loop once the loss changes by less then this value.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
verbose: bool, optional
lots of text output
default is False.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
sky_model_r: list of tf.Tensor objects, optional
chunked tensors containing model in same format as data_r
sky_model_i: list of tf.Tensor objects, optional
chunked tensors containing model in the same format as data_i
model_regularization: str, optional
type of model regularization to perform. Currently support "sum"
where the sums of real and imaginary parts (across all bls and freqs)
are constrained to be the same as the sum of real and imag parts
of data.
opt_kwargs: kwarg dict
additional kwargs for tf.opt.Optimizer(). See tensorflow docs.
Returns
-------
g_r_opt: tf.Tensor object
real part of optimized gains.
g_i_opt: tf.Tensor object
imag part of optimized gains.
fg_r_opt: tf.Tensor object
real part of foreground coeffs.
fg_i_opt: tf.Tensor object.
imag part of optimized foreground coeffs.
fit_history: dict
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
if graph_args_dict is None:
graph_args_dict = {}
# initialize the optimizer.
echo(f"Using {str(dtype)} precision.")
echo(f"{datetime.datetime.now()} Provided the following opt_kwargs")
for k in opt_kwargs:
echo(f"{k}: {opt_kwargs[k]}")
opt = OPTIMIZERS[optimizer](**opt_kwargs)
# set up history recording
fit_history = {"loss": []}
min_loss = 9e99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
# build up list of lists of ant0 and ant1 for gather ops
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if not freeze_model:
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = [g_r, g_i] + fg_r + fg_i
else:
vars = [g_r, g_i]
echo(
f"{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...",
verbose=verbose,
)
if not freeze_model:
echo(
f"Performing gradient descent on total of {int(np.sum([fgr.shape[0] * fgr.shape[1] for fgr in fg_r]))} complex foreground parameters",
verbose=verbose,
)
echo(
f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[str(fgr.shape[:2]) + ':' + str(dc.shape[1]) for fgr, dc in zip(fg_r, data_r)]}",
verbose=verbose,
)
if model_regularization == "sum":
prior_r_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_r[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
prior_i_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_i[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
def loss_function():
return mse_chunked_sum_regularized(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
prior_r_sum=prior_r_sum,
prior_i_sum=prior_i_sum,
)
else:
def loss_function():
return mse_chunked(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if n_profile_steps > 0:
echo(f"{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...")
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace("train", step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(
f"{datetime.datetime.now()} Building Computational Graph...\n",
verbose=verbose,
)
loss = train_step()
echo(
f"{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...\n",
verbose=verbose,
)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history["loss"].append(loss.numpy())
if use_min and fit_history["loss"][-1] < min_loss:
# store the g_r, g_i, fg_r, fg_i values that minimize loss
# in case of overshoot.
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if step >= 1 and np.abs(fit_history["loss"][-1] - fit_history["loss"][-2]) < tol:
echo(
f"Tolerance thresshold met with delta of {np.abs(fit_history['loss'][-1] - fit_history['loss'][-2]):.2e}. Terminating...\n ",
verbose=verbose,
)
break
# if we dont use use_min, then the last
# visited set of parameters will be used
# to set the ML params.
if not use_min:
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(
f"{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...\n",
verbose=verbose,
)
return g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history
def insert_model_into_uvdata_tensor(
uvdata,
time,
polarization,
ants_map,
red_grps,
model_r,
model_i,
scale_factor=1.0,
):
"""Insert fitted tensor values back into uvdata object for tensor mode.
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
red_grps: list of lists of int 2-tuples
a list of lists of 2-tuples where all antenna pairs within each sublist
are redundant with eachother. Assumes that conjugates are correctly taken.
model_r: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data
model_i: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model
scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
Returns
-------
N/A: Modifies uvdata inplace.
"""
antpairs_data = uvdata.get_antpairs()
polnum = np.where(
uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)
)[0][0]
for red_grp in red_grps:
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
if ap in antpairs_data:
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] + 1j * model_i[i, j]
else:
dinds = uvdata.antpair2ind(ap[::-1])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] - 1j * model_i[i, j]
uvdata.data_array[dinds, 0, :, polnum] = model * scale_factor
def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
"""Insert tensorized gains back into uvcal object
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
gains_re: dict with int keys and tf.Tensor object values
dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object
representing the real component of the complex gain for antenna i.
gains_im: dict with int keys and tf.Tensor object values
dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object
representing the imag component of the complex gain for antenna j.
Returns
-------
N/A: Modifies uvcal inplace.
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (
gains_re[ant_index].numpy() + 1j * gains_im[ant_index].numpy()
)
def tensorize_fg_coeffs(
data,
wgts,
fg_model_comps,
notebook_progressbar=False,
verbose=False,
):
"""Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
"""
echo(
f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n",
verbose=verbose,
)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [
tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks)
]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
# set up linear leastsq
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = data[cnum].shape[1] * data[cnum].shape[2]
nvecs = fg_model_comps[cnum].shape[0]
# pad with zeros
for gnum in range(ngrps):
nonzero_rows = np.where(
np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1)
)[0]
if len(nonzero_rows) > 0:
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
# solve linear leastsq
fg_coeff_chunk.append(
tf.reshape(
tf.linalg.lstsq(
tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero],
tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)),
),
(nvecs_nonzero,),
)
)
# pad zeros at the end back up to nvecs.
fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)])
# add two additional dummy indices to satify broadcasting rules.
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(
f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n",
verbose=verbose,
)
return fg_coeffs
def get_auto_weights(uvdata, delay_extent=25.0):
"""
inverse variance weights from interpolated autocorrelation data
Parameters
----------
uvdata: UVData object
UVData object containing autocorrelation data to use for computing inverse noise weights.
offset: float, optional
Fit autocorrelation to delay components with this width.
Returns
-------
data_weights: UVFlag object
UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.
"""
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode="flag")
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
# compute autocorrelation weights
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if bl[0] == bl[1]:
d_wf = uvdata.get_data(bl)
w_wf = ~uvdata.get_flags(bl)
auto_fit_dict[bl] = []
for ds, fs in zip(d_wf, w_wf):
# fit autocorr waterfall to DPSS modes.
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze()
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
# from autocorrelation fits, weights
for bl in bls:
smooth_weights = 1.0 / (auto_fit_dict[bl[0], bl[0], bl[-1]] * auto_fit_dict[bl[1], bl[1], bl[-1]])
smooth_weights *= ~uvdata.get_flags(bl)
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where(
data_weights.polarization_array == uvutils.polstr2num(bl[-1], x_orientation=data_weights.x_orientation)
)[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights
def calibrate_and_model_tensor(
uvdata,
fg_model_comps_dict,
gains=None,
freeze_model=False,
optimizer="Adamax",
tol=1e-14,
maxsteps=10000,
include_autos=False,
verbose=False,
sky_model=None,
dtype=np.float32,
use_min=False,
use_redundancy=False,
notebook_progressbar=False,
correct_resid=False,
correct_model=True,
weights=None,
nsamples_in_weights=True,
graph_mode=False,
grp_size_threshold=5,
n_profile_steps=0,
profile_log_dir="./logdir",
model_regularization="sum",
init_guesses_from_previous_time_step=False,
skip_threshold=0.5,
use_model_snr_weights=False,
**opt_kwargs,
):
"""Perform simultaneous calibration and foreground fitting using tensors.
Parameters
----------
uvdata: UVData object
uvdata objet of data to be calibrated.
fg_model_comps_dict: dictionary
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents
gains: UVCal object
UVCal with initial gain estimates.
There many smart ways to obtain initial gain estimates
but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).
Users can determine initial gains with their favorite established cal algorithm.
default is None -> start with unity gains.
WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
tol: float, optional
halting condition for optimizer loop. Stop loop when the change in the cost function falls
below tol.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
include_autos: bool, optional
include autocorrelations in fitting.
default is False.
verbose: bool, optional
generate lots of text.
default is False.
sky_model: UVData object, optional
a sky-model to use for initial estimates of foreground coeffs and
to set overall flux scale and phases.
Note that this model is not used to obtain initial gain estimates.
These must be provided through the gains argument.
dtype: numpy dtype, optional
the float precision to be used in tensorflow gradient descent.
runtime scales roughly inversely linear with precision.
default is np.float32
use_min: bool, optional
If True, use the set of parameters that determine minimum as the ML params
If False, use the last set of parameters visited by the optimization loop.
use_redundancy: bool, optional
if true, solve for one set of foreground coeffs per redundant baseline group
instead of per baseline.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
red_tol: float, optional
tolerance for determining baselines redundant (meters)
default is 1.0
correct_resid: bool, optional
if True, gain correct residual.
default is False
correct_model: bool, optional
if True, gain correct model.
default is False
weights: UVFlag object, optional.
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is True.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
model_regularization: str, optional
option to regularize model
supported 'post_hoc', 'sum'
default is 'post_hoc'
which sets sum of amps equal and sum of phases equal.
init_guesses_from_previous_time_step: bool, optional
if True, then use foreground coeffs and gains from previous time-step to
initialize gains for next time step.
skip_threshold: float, optional
if less then this fraction of data is unflagged on a particular poltime,
flag the entire poltime.
opt_kwargs: kwarg_dict
kwargs for tf.optimizers
Returns
-------
model: UVData object
uvdata object containing model of the foregrounds
resid: UVData object
uvdata object containing resids which are the data minus
the model with gains multiplied and then with the gains divided out.
gains: UVCal object
uvcal object containing estimates of the gain solutions. These solutions
are not referenced to any sky model and are likely orders of
fit_history:
dictionary containing fit history with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
antpairs_data = uvdata.get_antpairs()
if not include_autos:
antpairs_data = set([ap for ap in antpairs_data if ap[0] != ap[1]])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
# get redundant groups
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if gains is None:
echo(
f"{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...\n",
verbose=verbose,
)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if sky_model is None and model_regularization is not None:
echo(
f"{datetime.datetime.now()} Sky model is None. Initializing from data...\n",
verbose=verbose,
)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for i, ant in enumerate(gains.ant_array)}
# generate tensors to hold foreground components.
fg_model_comps, corr_inds = tensorize_fg_model_comps_dict(
fg_model_comps_dict=fg_model_comps_dict,
ants_map=ants_map,
dtype=dtype,
nfreqs=sky_model.Nfreqs,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
grp_size_threshold=grp_size_threshold,
)
echo(
f"{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...\n",
verbose=verbose,
)
# delete fg_model_comps_dict. It can take up a lot of memory.
del fg_model_comps_dict
# loop through polarization and times.
for polnum, pol in enumerate(uvdata.get_pols()):
echo(
f"{datetime.datetime.now()} Working on pol {pol}, {polnum + 1} of {uvdata.Npols}...\n",
verbose=verbose,
)
fit_history_p = {}
first_time = True
for time_index, time in enumerate(np.unique(uvdata.time_array)):
echo(
f"{datetime.datetime.now()} Working on time {time_index + 1} of {uvdata.Ntimes}...\n",
verbose=verbose,
)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-7, rtol=0.0)
frac_unflagged = np.count_nonzero(~uvdata.flag_array[bltsel, 0, :, polnum]) / (
uvdata.Nbls * uvdata.Nfreqs
)
# check that fraction of unflagged data > skip_threshold.
if frac_unflagged >= skip_threshold:
rmsdata = np.sqrt(
np.mean(
np.abs(uvdata.data_array[bltsel, 0, :, polnum][~uvdata.flag_array[bltsel, 0, :, polnum]]) ** 2.0
)
)
echo(f"{datetime.datetime.now()} Tensorizing data...\n", verbose=verbose)
data_r, data_i, wgts = tensorize_data(
uvdata,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
nsamples_in_weights=nsamples_in_weights,
dtype=dtype,
)
if sky_model is not None:
echo(f"{datetime.datetime.now()} Tensorizing sky model...\n", verbose=verbose)
sky_model_r, sky_model_i, _ = tensorize_data(
sky_model,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
dtype=dtype,
)
else:
sky_model_r, sky_model_i = None, None
if first_time or not init_guesses_from_previous_time_step:
first_time = False
echo(f"{datetime.datetime.now()} Tensorizing Gains...\n", verbose=verbose)
g_r, g_i = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
# generate initial guess for foreground coeffs.
echo(
f"{datetime.datetime.now()} Tensorizing Foreground coeffs...\n",
verbose=verbose,
)
fg_r = tensorize_fg_coeffs(
data=data_r,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
fg_i = tensorize_fg_coeffs(
data=data_i,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for fgr, fgi, fgc in zip(fg_r, fg_i, fg_model_comps)]
wgts = [(tf.square(wm[0]) + tf.square(wm[1])) * w for wm, w in zip(wgts_model, wgts)]
del wgts_model
# renormalize
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [w / wgts_sum for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index],) = fit_gains_and_foregrounds(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
data_r=data_r,
data_i=data_i,
wgts=wgts,
fg_comps=fg_model_comps,
corr_inds=corr_inds,
optimizer=optimizer,
use_min=use_min,
freeze_model=freeze_model,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
tol=tol,
dtype=dtype,
maxsteps=maxsteps,
graph_mode=graph_mode,
n_profile_steps=n_profile_steps,
profile_log_dir=profile_log_dir,
sky_model_r=sky_model_r,
sky_model_i=sky_model_i,
model_regularization=model_regularization,
**opt_kwargs,
)
# insert into model uvdata.
insert_model_into_uvdata_tensor(
uvdata=model,
time=time,
polarization=pol,
ants_map=ants_map,
red_grps=red_grps,
model_r=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_r,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
model_i=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_i,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
scale_factor=rmsdata,
)
# insert gains into uvcal
insert_gains_into_uvcal(
uvcal=gains,
time=time,
polarization=pol,
gains_re=g_r,
gains_im=g_i,
)
else:
echo(
f"{datetime.datetime.now()}: Only {frac_unflagged * 100}-percent of data unflagged. Skipping...\n",
verbose=verbose,
)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = "skipped!"
# normalize on sky model if we use post-hoc regularization
if not freeze_model and model_regularization == "post_hoc" and np.any(~model.flag_array[bltsel]):
renormalize(
uvdata_reference_model=sky_model,
uvdata_deconv=model,
gains=gains,
polarization=pol,
time=time,
additional_flags=uvdata.flag_array,
)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if not correct_model:
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0 # set resid to zero where model is flagged.
resid.data_array[uvdata.flag_array] = 0.0 # also set resid to zero where data is flagged.
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return model, resid, gains, fit_history
def flag_poltime(data_object, time, polarization):
if isinstance(data_object, UVData):
bltsel = np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0)
polnum = np.where(
data_object.polarization_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
data_object.flag_array[bltsel, :, :, polnum] = True
data_object.data_array[bltsel, :, :, polnum] = 0.0
elif isinstance(data_object, UVCal):
polnum = np.where(
data_object.jones_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
gindt = np.where(np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0))[0][0]
data_object.gain_array[:, 0, :, gindt, polnum] = 1.0
data_object.flag_array[:, 0, :, gindt, polnum] = True
else:
raise ValueError("only supports data_object that is UVCal or UVData.")
def calibrate_and_model_mixed(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
ant_dly=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
red_tol_freq=0.5,
n_angle_bins=200,
notebook_progressbar=False,
use_redundancy=False,
use_tensorflow_to_derive_modeling_comps=False,
eigenval_cutoff=1e-10,
dtype_matinv=np.float64,
require_exact_angle_match=True,
angle_match_tol=1e-3,
grp_size_threshold=5,
model_comps_dict=None,
save_dict_to=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors
for baselines with no frequency redundancy and simple_cov components for
groups of baselines that have some frequency redundancy.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
ant_dly: float, optional
intrinsic chromaticity of each antenna element
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
red_tol_freq: float, optional
tolerance for treating two baselines as having some
frequency redundancy. When frequency redundancy exists, baselines
will be modeled jointly.
n_angle_bins: int, optional
number of angular bins to use between -pi and pi to compare baselines
default is 200
notebook_progressbar: bool, optional
if True, show graphical notebook progress bar that looks good in jupyter.
default is False.
use_redundancy: bool, optional
If True, model all baselines within each redundant group with the same components
If False, model each baseline within each redundant group with sepearate components.
default is False.
use_tensorflow_to_derive_modeling_comps: bool, optional
Use tensorflow methods to derive multi-baseline modeling components.
recommended if you have a GPU with enough memory to perform spectral decomposition
of multi-baseline covariance matrices.
eigenval_cutoff: float, optional
threshold of eigenvectors to include in modeling components.
dtype_matinv: numpy.dtype, optional
data type to use for deriving modeling components.
default is np.float64 (need higher precision for cov-mat like calculation)
grp_size_threshold: int, optional
groups with number of elements less then this value are split up into single baselines.
default is 5.
model_comps_dict: dict, optional
dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps
for more specifics.
default is None -> compute fitting groups automatically.
save_dict_to: str, optional
save model_comps_dict to hdf5 container if True
default is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_tensor.
see docstring of calibrate_and_model_tensor.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
# get fitting groups
fitting_grps, blvecs, _, _ = modeling.get_uv_overlapping_grps_conjugated(
uvdata,
red_tol=red_tol,
include_autos=include_autos,
red_tol_freq=red_tol_freq,
n_angle_bins=n_angle_bins,
notebook_progressbar=notebook_progressbar,
require_exact_angle_match=require_exact_angle_match,
angle_match_tol=angle_match_tol,
)
if model_comps_dict is None:
model_comps_dict = modeling.yield_mixed_comps(
fitting_grps,
blvecs,
uvdata.freq_array[0],
eigenval_cutoff=eigenval_cutoff,
use_tensorflow=use_tensorflow_to_derive_modeling_comps,
ant_dly=ant_dly,
horizon=horizon,
offset=offset,
min_dly=min_dly,
verbose=verbose,
dtype=dtype_matinv,
notebook_progressbar=notebook_progressbar,
grp_size_threshold=grp_size_threshold,
)
if save_dict_to is not None:
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def calibrate_and_model_dpss(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
notebook_progressbar=False,
fg_model_comps_dict=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(
uvdata,
horizon=horizon,
min_dly=min_dly,
offset=offset,
include_autos=include_autos,
red_tol=red_tol,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=dpss_model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def fg_model(fg_r, fg_i, fg_comps):
vr = tf.reduce_sum(fg_r * fg_comps, axis=0)
vi = tf.reduce_sum(fg_i * fg_comps, axis=0)
return vr, vi
def data_model(g_r, g_i, fg_r, fg_i, fg_comps, ant0_inds, ant1_inds):
gr0 = tf.gather(g_r, ant0_inds)
gr1 = tf.gather(g_r, ant1_inds)
gi0 = tf.gather(g_i, ant0_inds)
gi1 = tf.gather(g_i, ant1_inds)
grgr = gr0 * gr1
gigi = gi0 * gi1
grgi = gr0 * gi1
gigr = gi0 * gr1
vr, vi = fg_model(fg_r, fg_i, fg_comps)
model_r = (grgr + gigi) * vr + (grgi - gigr) * vi
model_i = (gigr - grgi) * vr + (grgr + gigi) * vi
return model_r, model_i
def mse(model_r, model_i, data_r, data_i, wgts):
return tf.reduce_sum((tf.square(data_r - model_r) + tf.square(data_i - model_i)) * wgts)
def mse_chunked(g_r, g_i, fg_r, fg_i, fg_comps, nchunks, data_r, data_i, wgts, ant0_inds, ant1_inds, dtype=np.float32):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return tf.reduce_sum(tf.stack(cal_loss))
def mse_chunked_sum_regularized(
g_r,
g_i,
fg_r,
fg_i,
fg_comps,
nchunks,
data_r,
data_i,
wgts,
ant0_inds,
ant1_inds,
prior_r_sum,
prior_i_sum,
dtype=np.float32,
):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_i_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_r_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
# compute sum of real and imag parts x weights for regularization.
model_r_sum[cnum] += tf.reduce_sum(model_r * wgts[cnum])
model_i_sum[cnum] += tf.reduce_sum(model_i * wgts[cnum])
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return (
tf.reduce_sum(tf.stack(cal_loss))
+ tf.square(tf.reduce_sum(tf.stack(model_r_sum)) - prior_r_sum)
+ tf.square(tf.reduce_sum(tf.stack(model_i_sum)) - prior_i_sum)
)
def read_calibrate_and_model_dpss(
input_data_files,
input_model_files=None,
input_gain_files=None,
resid_outfilename=None,
gain_outfilename=None,
model_outfilename=None,
fitted_info_outfilename=None,
x_orientation="east",
clobber=False,
bllen_min=0.0,
bllen_max=np.inf,
bl_ew_min=0.0,
ex_ants=None,
select_ants=None,
gpu_index=None,
gpu_memory_limit=None,
precision=32,
use_autocorrs_in_weights=False,
**calibration_kwargs,
):
"""
Driver function for using calamity with DPSS modeling.
Parameters
----------
input_data_files: list of strings or UVData object.
list of paths to input files to read in and calibrate.
input_model_files: list of strings or UVData object, optional
list of paths to model files for overal phase/amp reference.
Default is None -> use input files as model for overall
phase and amplitude calibration.
input_gain_files: list of strings or UVCal object, optional
list of paths to gain files to use as initial guesses for calibration.
resid_outfilename: str, optional
path for file to write residuals.
default is None -> don't write out residuals.
gain_outfilename: str, optional
path to gain calfits to write fitted gains.
default is None -> don't write out gains.
model_outfilename, str, optional
path to file to write model output.
default is None -> Don't write model.
fitting_info_outfilename, str, optional
string to pickel fitting info to.
n_output_chunks: int optional
split up outputs into n_output_chunks chunked by time.
default is None -> write single output file.
bllen_min: float, optional
select all baselines with length greater then this value [meters].
default is 0.0
bllen_max: float, optional
select only baselines with length less then this value [meters].
default is np.inf.
bl_ew_min: float, optional
select all baselines with EW projected length greater then this value [meters].
default is 0.0
gpu_index: int, optional
limit visible GPUs to be the index of this GPU.
default: None -> all GPUs are visible.
gpu_memory_limit: float, optional
GiB of memory on GPU that can be used.
default None -> all memory available.
use_autocorrs_in_weights: bool, optional
if True, use smooth fits to autocorrelations as
inverse variance weights.
default is False.
calibration_kwargs: kwarg dict
see kwrags for calibration_and_model_dpss()
Returns
-------
model_fit: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid_fit: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains_fit: UVCal object
uvcal object containing fitted gains.
fit_info:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
gpus = tf.config.list_physical_devices("GPU")
if gpu_index is not None:
# See https://www.tensorflow.org/guide/gpu
if gpus:
if gpu_memory_limit is None:
tf.config.set_visible_devices(gpus[gpu_index], "GPU")
else:
tf.config.set_logical_device_configuration(
gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=gpu_memory_limit * 1024)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(
uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants
)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if input_model_files is not None:
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if uvd_model is not None:
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if input_gain_files is not None:
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
# run calibration with specified GPU device.
dtype = {32: np.float32, 64: np.float64}[precision]
if gpu_index is not None and gpus:
with tf.device(f"/device:GPU:{gpus[gpu_index].name[-1]}"):
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
else:
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
if resid_outfilename is not None:
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if gain_outfilename is not None:
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if model_outfilename is not None:
model_fit.write_uvh5(model_outfilename, clobber=clobber)
# don't write fitting_info_outfilename for now.
fit_info["calibration_kwargs"] = calibration_kwargs
fit_info["calibration_kwargs"]["dtype"] = dtype
# don't write fitting_info_outfilename for now.
return model_fit, resid_fit, gains_fit, fit_info
def input_output_parser():
ap = argparse.ArgumentParser()
sp = ap.add_argument_group("Input and Output Arguments.")
sp.add_argument("--input_data_files", type=str, nargs="+", help="paths to data files to calibrate.", required=True)
sp.add_argument(
"--input_model_files", type=str, nargs="+", help="paths to model files to set overal amplitude and phase."
)
sp.add_argument("--input_gain_files", type=str, nargs="+", help="paths to gains to use as a staring point.")
sp.add_argument("--resid_outfilename", type=str, default=None, help="postfix for resid output file.")
sp.add_argument("--model_outfilename", type=str, default=None, help="postfix for foreground model file.")
sp.add_argument("--gain_outfilename", type=str, default=None, help="path for writing fitted gains.")
sp.add_argument("--clobber", action="store_true", default="False", help="Overwrite existing outputs.")
sp.add_argument("--x_orientation", default="east", type=str, help="x_orientation of feeds to set in output gains.")
sp.add_argument(
"--bllen_min", default=0.0, type=float, help="minimum baseline length to include in calibration and outputs."
)
sp.add_argument(
"--bllen_max", default=np.inf, type=float, help="maximum baseline length to include in calbration and outputs."
)
sp.add_argument(
"--bl_ew_min",
default=0.0,
type=float,
help="minimum EW baseline component to include in calibration and outputs.",
)
sp.add_argument(
"--ex_ants", default=None, type=int, nargs="+", help="Antennas to exclude from calibration and modeling."
)
sp.add_argument(
"--select_ants",
default=None,
type=int,
nargs="+",
help="Antennas to select exclusively for calibration and modeling.",
)
sp.add_argument("--gpu_index", default=None, type=int, help="Index of GPU to run on (if on a multi-GPU machine).")
sp.add_argument("--gpu_memory_limit", default=None, type=int, help="Limit GPU memory use to this many GBytes.")
sp.add_argument("--precision", default=32, type=int, help="Number of bits to keep track of.")
return ap
def fitting_argparser():
ap = input_output_parser()
sp = ap.add_argument_group("General Fitting Arguments.")
sp.add_argument(
"--tol",
type=float,
default=1e-14,
help="Stop gradient descent after cost function converges to within this value.",
)
sp.add_argument(
"--optimizer", type=str, default="Adamax", help="First order optimizer to use for gradient descent."
)
sp.add_argument("--maxsteps", type=int, default=10000, help="Max number of steps to iterate during optimization.")
sp.add_argument("--verbose", default=False, action="store_true", help="lots of text ouputs.")
sp.add_argument(
"--use_min",
default=False,
action="store_true",
help="Use params for mimimum cost function derived. Otherwise, use the params last visited by the descent. Avoids momentum overshoot.",
)
sp.add_argument(
"--use_redundancy",
default=False,
action="store_true",
help="Model redundant visibilities with the same set of foreground parameters.",
)
sp.add_argument(
"--correct_model", default=True, action="store_true", help="Remove gain effects from foreground model."
)
sp.add_argument(
"--correct_resid", default=False, action="store_true", help="Apply fitted gains to the fitted residuals."
)
sp.add_argument(
"--graph_mode",
default=False,
action="store_true",
help="Pre-compile computational graph before running gradient descent. Not reccomended for GPUs.",
)
sp.add_argument(
"--init_guesses_from_previous_time_step",
default=False,
action="store_true",
help="initialize gain and foreground guesses from previous time step when calibrating multiple times.",
)
sp.add_argument("--learning_rate", type=float, default=1e-2, help="gradient descent learning rate.")
sp.add_argument(
"--red_tol", type=float, default=1.0, help="Tolerance for determining redundancy between baselines [meters]."
)
sp.add_argument(
"--skip_threshold",
type=float,
default=0.5,
help="Skip and flag time/polarization if more then this fractionf of data is flagged.",
)
sp.add_argument("--model_regularization", type=str, default="post_hoc")
sp.add_argument(
"--nsamples_in_weights", default=False, action="store_true", help="Weight contributions to MSE by nsamples."
)
sp.add_argument(
"--use_model_snr_weights",
default=False,
action="store_true",
help="If True, weight contributions to MSE as proportional to SNR.",
)
sp.add_argument(
"--use_autocorrs_in_weights",
default=False,
action="store_true",
help="If True, use autocorrelations to derive relative SNR weights.",
)
return ap
def dpss_fit_argparser():
ap = fitting_argparser()
sp = ap.add_argument_group("DPSS Specific Fitting Arguments.")
sp.add_argument("--horizon", default=1.0, type=float, help="Fraction of horizon delay to model with DPSS modes.")
sp.add_argument("--min_dly", default=0.0, type=float, help="Minimum delay [ns] to model with DPSS modes.")
sp.add_argument(
"--offset", default=0.0, type=float, help="Offset from horizon delay [ns] to model with DPSS modes."
)
return ap
|
def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
"""Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return gains_re, gains_im
| 368 | 398 |
import numpy as np
import tensorflow as tf
from pyuvdata import UVData, UVCal, UVFlag
from . import utils
import copy
import argparse
import itertools
import datetime
from pyuvdata import utils as uvutils
from .utils import echo
from .utils import PBARS
from . import cal_utils
from . import modeling
import re
OPTIMIZERS = {
"Adadelta": tf.optimizers.Adadelta,
"Adam": tf.optimizers.Adam,
"Adamax": tf.optimizers.Adamax,
"Ftrl": tf.optimizers.Ftrl,
"Nadam": tf.optimizers.Nadam,
"SGD": tf.optimizers.SGD,
"RMSprop": tf.optimizers.RMSprop,
"Adagrad": tf.optimizers.Adagrad
}
def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"""
Order dict keys in order of number of baselines in each group
chunk fit_groups in fg_model_comps_dict into chunks where all groups in the
same chunk have the same number of baselines in each group.
Parameters
----------
fg_model_comps_dict: dict
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
use_redundancy: bool, optional
If False, break fitting groups with the same number of baselines in each redundant
sub_group into different fitting groups with no redundancy in each
redundant subgroup. This is to prevent fitting groups with single
redundant groups of varying lengths from being lumped into different chunks
increasing the number of chunks has a more significant impact on run-time
then increasing the number of baselines in each chunk.
default is False.
Returns:
fg_model_comps_dict_chunked: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
"""
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if not use_redundancy:
# We can remove redundancies for fitting groups of baselines that have the same
# number of elements in each redundant group.
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
# only break up groups with small numbers of group elements.
if np.allclose(rlens, np.mean(rlens)) and len(rlens) < grp_size_threshold:
# split up groups.
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if nbl in chunked_keys:
chunked_keys[nbl].append(fit_grp)
if fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]:
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked
def tensorize_fg_model_comps_dict(
fg_model_comps_dict,
ants_map,
nfreqs,
use_redundancy=False,
dtype=np.float32,
notebook_progressbar=False,
verbose=False,
grp_size_threshold=5,
):
"""Convert per-baseline model components into a Ndata x Ncomponent tensor
Parameters
----------
fg_model_comps_dict: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
nfreqs: int, optional
number of frequency channels
dtype: numpy.dtype
tensor data types
default is np.float32
Returns
-------
fg_model_comps: list
list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)
where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same
modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls
to be the maximum number of vectors representing any of the ngrps baseline grps
which means that many rows in nvecs will be zero. For example, if we are modeling with
vectors that all span nbls=1 baseline and using delay-modes to model our data
then nvecs will equal the largest number of delay modes necessary to model the wedge
on all baselines even though the short baselines are described by far fewer modes
on short baselines, most of the rows along the vector dimension will therefor be zero.
This is wasteful of memory but it allows us to take advantage of the fast
dense matrix operations on a GPU.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
"""
echo(
f"{datetime.datetime.now()} Computing foreground components matrices...\n",
verbose=verbose,
)
# chunk foreground components.
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(
fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold
)
fg_model_comps = []
corr_inds = []
for nbls, nvecs in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for grpnum, modeling_grp in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for rgrpnum, red_grp in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice(rgrpnum * nfreqs, (rgrpnum + 1) * nfreqs)
dslice = slice(nbl * nfreqs, (nbl + 1) * nfreqs)
modeling_matrix[vecslice, grpnum, nbl] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][
compslice
].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return fg_model_comps, corr_inds
def tensorize_data(
uvdata,
corr_inds,
ants_map,
polarization,
time,
data_scale_factor=1.0,
weights=None,
nsamples_in_weights=False,
dtype=np.float32,
):
"""Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples.
"""
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = ants_map_inv[i], ants_map_inv[j]
bl = ap + (polarization,)
dinds1, dinds2, pol_ind = uvdata._key2inds(bl)
if len(dinds1) > 0:
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze()
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[i, j] = data.real.astype(dtype)
data_i[i, j] = data.imag.astype(dtype)
if weights is None:
wgts[i, j] = iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
else:
if ap in weights.get_antpairs():
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::-1])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]]
polnum = np.where(
weights.polarization_array
== uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)
)[0][0]
wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
wgtsum += np.sum(wgts[i, j])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return data_r, data_i, wgts
def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
"""Remove arbitrary phase and amplitude from deconvolved model and gains.
Parameters
----------
uvdata_reference_model: UVData object
Reference model for "true" visibilities.
uvdata_deconv: UVData object
"Deconvolved" data solved for in self-cal loop.
gains: UVCal object
Gains solved for in self-cal loop.
polarization: str
Polarization string to compute phase and amplitude correction for.
additional_flags: np.ndarray
Any additional flags you wish to use for excluding data from normalization
fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.
default is None -> Only exclude data in flags from reference model and deconv from
determinging normalization.
Returns
-------
N/A: Modifies uvdata_deconv and gains in-place.
"""
# compute and multiply out scale-factor accounting for overall amplitude and phase degeneracy.
polnum_data = np.where(
uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-7, rtol=0.0)
selection = (
~uvdata_deconv.flag_array[bltsel, :, :, polnum_data]
& ~uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]
)
if additional_flags is not None:
selection = selection & ~additional_flags[bltsel, :, :, polnum_data]
data_ratio = (
uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection]
/ uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection]
)
data_ratio[~np.isfinite(data_ratio)] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean(np.abs(data_ratio) ** 2.0))
scale_factor = scale_factor_abs # * np.exp(1j * scale_factor_phase) Need to figure this out later.
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where(
gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor) ** -0.5
def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
"""Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return gains_re, gains_im
def yield_fg_model_array(
nants,
nfreqs,
fg_model_comps,
fg_coeffs,
corr_inds,
):
"""Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data
"""
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum(fg_coeffs[cnum] * fg_model_comps[cnum], axis=0).numpy()
for gnum in range(ngrps):
for blnum, (i, j) in enumerate(corr_inds[cnum][gnum]):
model[i, j] = gchunk[gnum, blnum]
return model
def fit_gains_and_foregrounds(
g_r,
g_i,
fg_r,
fg_i,
data_r,
data_i,
wgts,
fg_comps,
corr_inds,
use_min=False,
tol=1e-14,
maxsteps=10000,
optimizer="Adamax",
freeze_model=False,
verbose=False,
notebook_progressbar=False,
dtype=np.float32,
graph_mode=False,
n_profile_steps=0,
profile_log_dir="./logdir",
sky_model_r=None,
sky_model_i=None,
model_regularization=None,
graph_args_dict=None,
**opt_kwargs,
):
"""Run optimization loop to fit gains and foreground components.
Parameters
----------
g_r: tf.Tensor object.
tf.Tensor object holding real parts of gains.
g_i: tf.Tensor object.
tf.Tensor object holding imag parts of gains.
fg_r: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding foreground coeffs.
fg_i: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding imag coeffs.
data_r: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
real part of data to fit.
data_i: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
imag part of data to fit.
wgts: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
fg_comps: list:
list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)
represents vectors to be used in modeling visibilities.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
use_min: bool, optional
if True, use the value that minimizes the loss function
regardless of where optimization loop ended up
(prevents overshooting due to excess momentum)
tol: float, optional
halt optimization loop once the loss changes by less then this value.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
verbose: bool, optional
lots of text output
default is False.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
sky_model_r: list of tf.Tensor objects, optional
chunked tensors containing model in same format as data_r
sky_model_i: list of tf.Tensor objects, optional
chunked tensors containing model in the same format as data_i
model_regularization: str, optional
type of model regularization to perform. Currently support "sum"
where the sums of real and imaginary parts (across all bls and freqs)
are constrained to be the same as the sum of real and imag parts
of data.
opt_kwargs: kwarg dict
additional kwargs for tf.opt.Optimizer(). See tensorflow docs.
Returns
-------
g_r_opt: tf.Tensor object
real part of optimized gains.
g_i_opt: tf.Tensor object
imag part of optimized gains.
fg_r_opt: tf.Tensor object
real part of foreground coeffs.
fg_i_opt: tf.Tensor object.
imag part of optimized foreground coeffs.
fit_history: dict
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
if graph_args_dict is None:
graph_args_dict = {}
# initialize the optimizer.
echo(f"Using {str(dtype)} precision.")
echo(f"{datetime.datetime.now()} Provided the following opt_kwargs")
for k in opt_kwargs:
echo(f"{k}: {opt_kwargs[k]}")
opt = OPTIMIZERS[optimizer](**opt_kwargs)
# set up history recording
fit_history = {"loss": []}
min_loss = 9e99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
# build up list of lists of ant0 and ant1 for gather ops
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if not freeze_model:
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = [g_r, g_i] + fg_r + fg_i
else:
vars = [g_r, g_i]
echo(
f"{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...",
verbose=verbose,
)
if not freeze_model:
echo(
f"Performing gradient descent on total of {int(np.sum([fgr.shape[0] * fgr.shape[1] for fgr in fg_r]))} complex foreground parameters",
verbose=verbose,
)
echo(
f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[str(fgr.shape[:2]) + ':' + str(dc.shape[1]) for fgr, dc in zip(fg_r, data_r)]}",
verbose=verbose,
)
if model_regularization == "sum":
prior_r_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_r[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
prior_i_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_i[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
def loss_function():
return mse_chunked_sum_regularized(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
prior_r_sum=prior_r_sum,
prior_i_sum=prior_i_sum,
)
else:
def loss_function():
return mse_chunked(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if n_profile_steps > 0:
echo(f"{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...")
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace("train", step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(
f"{datetime.datetime.now()} Building Computational Graph...\n",
verbose=verbose,
)
loss = train_step()
echo(
f"{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...\n",
verbose=verbose,
)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history["loss"].append(loss.numpy())
if use_min and fit_history["loss"][-1] < min_loss:
# store the g_r, g_i, fg_r, fg_i values that minimize loss
# in case of overshoot.
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if step >= 1 and np.abs(fit_history["loss"][-1] - fit_history["loss"][-2]) < tol:
echo(
f"Tolerance thresshold met with delta of {np.abs(fit_history['loss'][-1] - fit_history['loss'][-2]):.2e}. Terminating...\n ",
verbose=verbose,
)
break
# if we dont use use_min, then the last
# visited set of parameters will be used
# to set the ML params.
if not use_min:
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(
f"{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...\n",
verbose=verbose,
)
return g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history
def insert_model_into_uvdata_tensor(
uvdata,
time,
polarization,
ants_map,
red_grps,
model_r,
model_i,
scale_factor=1.0,
):
"""Insert fitted tensor values back into uvdata object for tensor mode.
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
red_grps: list of lists of int 2-tuples
a list of lists of 2-tuples where all antenna pairs within each sublist
are redundant with eachother. Assumes that conjugates are correctly taken.
model_r: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data
model_i: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model
scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
Returns
-------
N/A: Modifies uvdata inplace.
"""
antpairs_data = uvdata.get_antpairs()
polnum = np.where(
uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)
)[0][0]
for red_grp in red_grps:
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
if ap in antpairs_data:
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] + 1j * model_i[i, j]
else:
dinds = uvdata.antpair2ind(ap[::-1])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] - 1j * model_i[i, j]
uvdata.data_array[dinds, 0, :, polnum] = model * scale_factor
def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
"""Insert tensorized gains back into uvcal object
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
gains_re: dict with int keys and tf.Tensor object values
dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object
representing the real component of the complex gain for antenna i.
gains_im: dict with int keys and tf.Tensor object values
dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object
representing the imag component of the complex gain for antenna j.
Returns
-------
N/A: Modifies uvcal inplace.
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (
gains_re[ant_index].numpy() + 1j * gains_im[ant_index].numpy()
)
def tensorize_fg_coeffs(
data,
wgts,
fg_model_comps,
notebook_progressbar=False,
verbose=False,
):
"""Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
"""
echo(
f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n",
verbose=verbose,
)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [
tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks)
]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
# set up linear leastsq
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = data[cnum].shape[1] * data[cnum].shape[2]
nvecs = fg_model_comps[cnum].shape[0]
# pad with zeros
for gnum in range(ngrps):
nonzero_rows = np.where(
np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1)
)[0]
if len(nonzero_rows) > 0:
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
# solve linear leastsq
fg_coeff_chunk.append(
tf.reshape(
tf.linalg.lstsq(
tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero],
tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)),
),
(nvecs_nonzero,),
)
)
# pad zeros at the end back up to nvecs.
fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)])
# add two additional dummy indices to satify broadcasting rules.
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(
f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n",
verbose=verbose,
)
return fg_coeffs
def get_auto_weights(uvdata, delay_extent=25.0):
"""
inverse variance weights from interpolated autocorrelation data
Parameters
----------
uvdata: UVData object
UVData object containing autocorrelation data to use for computing inverse noise weights.
offset: float, optional
Fit autocorrelation to delay components with this width.
Returns
-------
data_weights: UVFlag object
UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.
"""
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode="flag")
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
# compute autocorrelation weights
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if bl[0] == bl[1]:
d_wf = uvdata.get_data(bl)
w_wf = ~uvdata.get_flags(bl)
auto_fit_dict[bl] = []
for ds, fs in zip(d_wf, w_wf):
# fit autocorr waterfall to DPSS modes.
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze()
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
# from autocorrelation fits, weights
for bl in bls:
smooth_weights = 1.0 / (auto_fit_dict[bl[0], bl[0], bl[-1]] * auto_fit_dict[bl[1], bl[1], bl[-1]])
smooth_weights *= ~uvdata.get_flags(bl)
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where(
data_weights.polarization_array == uvutils.polstr2num(bl[-1], x_orientation=data_weights.x_orientation)
)[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights
def calibrate_and_model_tensor(
uvdata,
fg_model_comps_dict,
gains=None,
freeze_model=False,
optimizer="Adamax",
tol=1e-14,
maxsteps=10000,
include_autos=False,
verbose=False,
sky_model=None,
dtype=np.float32,
use_min=False,
use_redundancy=False,
notebook_progressbar=False,
correct_resid=False,
correct_model=True,
weights=None,
nsamples_in_weights=True,
graph_mode=False,
grp_size_threshold=5,
n_profile_steps=0,
profile_log_dir="./logdir",
model_regularization="sum",
init_guesses_from_previous_time_step=False,
skip_threshold=0.5,
use_model_snr_weights=False,
**opt_kwargs,
):
"""Perform simultaneous calibration and foreground fitting using tensors.
Parameters
----------
uvdata: UVData object
uvdata objet of data to be calibrated.
fg_model_comps_dict: dictionary
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents
gains: UVCal object
UVCal with initial gain estimates.
There many smart ways to obtain initial gain estimates
but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).
Users can determine initial gains with their favorite established cal algorithm.
default is None -> start with unity gains.
WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
tol: float, optional
halting condition for optimizer loop. Stop loop when the change in the cost function falls
below tol.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
include_autos: bool, optional
include autocorrelations in fitting.
default is False.
verbose: bool, optional
generate lots of text.
default is False.
sky_model: UVData object, optional
a sky-model to use for initial estimates of foreground coeffs and
to set overall flux scale and phases.
Note that this model is not used to obtain initial gain estimates.
These must be provided through the gains argument.
dtype: numpy dtype, optional
the float precision to be used in tensorflow gradient descent.
runtime scales roughly inversely linear with precision.
default is np.float32
use_min: bool, optional
If True, use the set of parameters that determine minimum as the ML params
If False, use the last set of parameters visited by the optimization loop.
use_redundancy: bool, optional
if true, solve for one set of foreground coeffs per redundant baseline group
instead of per baseline.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
red_tol: float, optional
tolerance for determining baselines redundant (meters)
default is 1.0
correct_resid: bool, optional
if True, gain correct residual.
default is False
correct_model: bool, optional
if True, gain correct model.
default is False
weights: UVFlag object, optional.
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is True.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
model_regularization: str, optional
option to regularize model
supported 'post_hoc', 'sum'
default is 'post_hoc'
which sets sum of amps equal and sum of phases equal.
init_guesses_from_previous_time_step: bool, optional
if True, then use foreground coeffs and gains from previous time-step to
initialize gains for next time step.
skip_threshold: float, optional
if less then this fraction of data is unflagged on a particular poltime,
flag the entire poltime.
opt_kwargs: kwarg_dict
kwargs for tf.optimizers
Returns
-------
model: UVData object
uvdata object containing model of the foregrounds
resid: UVData object
uvdata object containing resids which are the data minus
the model with gains multiplied and then with the gains divided out.
gains: UVCal object
uvcal object containing estimates of the gain solutions. These solutions
are not referenced to any sky model and are likely orders of
fit_history:
dictionary containing fit history with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
antpairs_data = uvdata.get_antpairs()
if not include_autos:
antpairs_data = set([ap for ap in antpairs_data if ap[0] != ap[1]])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
# get redundant groups
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if gains is None:
echo(
f"{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...\n",
verbose=verbose,
)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if sky_model is None and model_regularization is not None:
echo(
f"{datetime.datetime.now()} Sky model is None. Initializing from data...\n",
verbose=verbose,
)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for i, ant in enumerate(gains.ant_array)}
# generate tensors to hold foreground components.
fg_model_comps, corr_inds = tensorize_fg_model_comps_dict(
fg_model_comps_dict=fg_model_comps_dict,
ants_map=ants_map,
dtype=dtype,
nfreqs=sky_model.Nfreqs,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
grp_size_threshold=grp_size_threshold,
)
echo(
f"{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...\n",
verbose=verbose,
)
# delete fg_model_comps_dict. It can take up a lot of memory.
del fg_model_comps_dict
# loop through polarization and times.
for polnum, pol in enumerate(uvdata.get_pols()):
echo(
f"{datetime.datetime.now()} Working on pol {pol}, {polnum + 1} of {uvdata.Npols}...\n",
verbose=verbose,
)
fit_history_p = {}
first_time = True
for time_index, time in enumerate(np.unique(uvdata.time_array)):
echo(
f"{datetime.datetime.now()} Working on time {time_index + 1} of {uvdata.Ntimes}...\n",
verbose=verbose,
)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-7, rtol=0.0)
frac_unflagged = np.count_nonzero(~uvdata.flag_array[bltsel, 0, :, polnum]) / (
uvdata.Nbls * uvdata.Nfreqs
)
# check that fraction of unflagged data > skip_threshold.
if frac_unflagged >= skip_threshold:
rmsdata = np.sqrt(
np.mean(
np.abs(uvdata.data_array[bltsel, 0, :, polnum][~uvdata.flag_array[bltsel, 0, :, polnum]]) ** 2.0
)
)
echo(f"{datetime.datetime.now()} Tensorizing data...\n", verbose=verbose)
data_r, data_i, wgts = tensorize_data(
uvdata,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
nsamples_in_weights=nsamples_in_weights,
dtype=dtype,
)
if sky_model is not None:
echo(f"{datetime.datetime.now()} Tensorizing sky model...\n", verbose=verbose)
sky_model_r, sky_model_i, _ = tensorize_data(
sky_model,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
dtype=dtype,
)
else:
sky_model_r, sky_model_i = None, None
if first_time or not init_guesses_from_previous_time_step:
first_time = False
echo(f"{datetime.datetime.now()} Tensorizing Gains...\n", verbose=verbose)
g_r, g_i = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
# generate initial guess for foreground coeffs.
echo(
f"{datetime.datetime.now()} Tensorizing Foreground coeffs...\n",
verbose=verbose,
)
fg_r = tensorize_fg_coeffs(
data=data_r,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
fg_i = tensorize_fg_coeffs(
data=data_i,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for fgr, fgi, fgc in zip(fg_r, fg_i, fg_model_comps)]
wgts = [(tf.square(wm[0]) + tf.square(wm[1])) * w for wm, w in zip(wgts_model, wgts)]
del wgts_model
# renormalize
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [w / wgts_sum for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index],) = fit_gains_and_foregrounds(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
data_r=data_r,
data_i=data_i,
wgts=wgts,
fg_comps=fg_model_comps,
corr_inds=corr_inds,
optimizer=optimizer,
use_min=use_min,
freeze_model=freeze_model,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
tol=tol,
dtype=dtype,
maxsteps=maxsteps,
graph_mode=graph_mode,
n_profile_steps=n_profile_steps,
profile_log_dir=profile_log_dir,
sky_model_r=sky_model_r,
sky_model_i=sky_model_i,
model_regularization=model_regularization,
**opt_kwargs,
)
# insert into model uvdata.
insert_model_into_uvdata_tensor(
uvdata=model,
time=time,
polarization=pol,
ants_map=ants_map,
red_grps=red_grps,
model_r=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_r,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
model_i=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_i,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
scale_factor=rmsdata,
)
# insert gains into uvcal
insert_gains_into_uvcal(
uvcal=gains,
time=time,
polarization=pol,
gains_re=g_r,
gains_im=g_i,
)
else:
echo(
f"{datetime.datetime.now()}: Only {frac_unflagged * 100}-percent of data unflagged. Skipping...\n",
verbose=verbose,
)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = "skipped!"
# normalize on sky model if we use post-hoc regularization
if not freeze_model and model_regularization == "post_hoc" and np.any(~model.flag_array[bltsel]):
renormalize(
uvdata_reference_model=sky_model,
uvdata_deconv=model,
gains=gains,
polarization=pol,
time=time,
additional_flags=uvdata.flag_array,
)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if not correct_model:
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0 # set resid to zero where model is flagged.
resid.data_array[uvdata.flag_array] = 0.0 # also set resid to zero where data is flagged.
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return model, resid, gains, fit_history
def flag_poltime(data_object, time, polarization):
if isinstance(data_object, UVData):
bltsel = np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0)
polnum = np.where(
data_object.polarization_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
data_object.flag_array[bltsel, :, :, polnum] = True
data_object.data_array[bltsel, :, :, polnum] = 0.0
elif isinstance(data_object, UVCal):
polnum = np.where(
data_object.jones_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
gindt = np.where(np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0))[0][0]
data_object.gain_array[:, 0, :, gindt, polnum] = 1.0
data_object.flag_array[:, 0, :, gindt, polnum] = True
else:
raise ValueError("only supports data_object that is UVCal or UVData.")
def calibrate_and_model_mixed(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
ant_dly=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
red_tol_freq=0.5,
n_angle_bins=200,
notebook_progressbar=False,
use_redundancy=False,
use_tensorflow_to_derive_modeling_comps=False,
eigenval_cutoff=1e-10,
dtype_matinv=np.float64,
require_exact_angle_match=True,
angle_match_tol=1e-3,
grp_size_threshold=5,
model_comps_dict=None,
save_dict_to=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors
for baselines with no frequency redundancy and simple_cov components for
groups of baselines that have some frequency redundancy.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
ant_dly: float, optional
intrinsic chromaticity of each antenna element
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
red_tol_freq: float, optional
tolerance for treating two baselines as having some
frequency redundancy. When frequency redundancy exists, baselines
will be modeled jointly.
n_angle_bins: int, optional
number of angular bins to use between -pi and pi to compare baselines
default is 200
notebook_progressbar: bool, optional
if True, show graphical notebook progress bar that looks good in jupyter.
default is False.
use_redundancy: bool, optional
If True, model all baselines within each redundant group with the same components
If False, model each baseline within each redundant group with sepearate components.
default is False.
use_tensorflow_to_derive_modeling_comps: bool, optional
Use tensorflow methods to derive multi-baseline modeling components.
recommended if you have a GPU with enough memory to perform spectral decomposition
of multi-baseline covariance matrices.
eigenval_cutoff: float, optional
threshold of eigenvectors to include in modeling components.
dtype_matinv: numpy.dtype, optional
data type to use for deriving modeling components.
default is np.float64 (need higher precision for cov-mat like calculation)
grp_size_threshold: int, optional
groups with number of elements less then this value are split up into single baselines.
default is 5.
model_comps_dict: dict, optional
dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps
for more specifics.
default is None -> compute fitting groups automatically.
save_dict_to: str, optional
save model_comps_dict to hdf5 container if True
default is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_tensor.
see docstring of calibrate_and_model_tensor.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
# get fitting groups
fitting_grps, blvecs, _, _ = modeling.get_uv_overlapping_grps_conjugated(
uvdata,
red_tol=red_tol,
include_autos=include_autos,
red_tol_freq=red_tol_freq,
n_angle_bins=n_angle_bins,
notebook_progressbar=notebook_progressbar,
require_exact_angle_match=require_exact_angle_match,
angle_match_tol=angle_match_tol,
)
if model_comps_dict is None:
model_comps_dict = modeling.yield_mixed_comps(
fitting_grps,
blvecs,
uvdata.freq_array[0],
eigenval_cutoff=eigenval_cutoff,
use_tensorflow=use_tensorflow_to_derive_modeling_comps,
ant_dly=ant_dly,
horizon=horizon,
offset=offset,
min_dly=min_dly,
verbose=verbose,
dtype=dtype_matinv,
notebook_progressbar=notebook_progressbar,
grp_size_threshold=grp_size_threshold,
)
if save_dict_to is not None:
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def calibrate_and_model_dpss(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
notebook_progressbar=False,
fg_model_comps_dict=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(
uvdata,
horizon=horizon,
min_dly=min_dly,
offset=offset,
include_autos=include_autos,
red_tol=red_tol,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=dpss_model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def fg_model(fg_r, fg_i, fg_comps):
vr = tf.reduce_sum(fg_r * fg_comps, axis=0)
vi = tf.reduce_sum(fg_i * fg_comps, axis=0)
return vr, vi
def data_model(g_r, g_i, fg_r, fg_i, fg_comps, ant0_inds, ant1_inds):
gr0 = tf.gather(g_r, ant0_inds)
gr1 = tf.gather(g_r, ant1_inds)
gi0 = tf.gather(g_i, ant0_inds)
gi1 = tf.gather(g_i, ant1_inds)
grgr = gr0 * gr1
gigi = gi0 * gi1
grgi = gr0 * gi1
gigr = gi0 * gr1
vr, vi = fg_model(fg_r, fg_i, fg_comps)
model_r = (grgr + gigi) * vr + (grgi - gigr) * vi
model_i = (gigr - grgi) * vr + (grgr + gigi) * vi
return model_r, model_i
def mse(model_r, model_i, data_r, data_i, wgts):
return tf.reduce_sum((tf.square(data_r - model_r) + tf.square(data_i - model_i)) * wgts)
def mse_chunked(g_r, g_i, fg_r, fg_i, fg_comps, nchunks, data_r, data_i, wgts, ant0_inds, ant1_inds, dtype=np.float32):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return tf.reduce_sum(tf.stack(cal_loss))
def mse_chunked_sum_regularized(
g_r,
g_i,
fg_r,
fg_i,
fg_comps,
nchunks,
data_r,
data_i,
wgts,
ant0_inds,
ant1_inds,
prior_r_sum,
prior_i_sum,
dtype=np.float32,
):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_i_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_r_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
# compute sum of real and imag parts x weights for regularization.
model_r_sum[cnum] += tf.reduce_sum(model_r * wgts[cnum])
model_i_sum[cnum] += tf.reduce_sum(model_i * wgts[cnum])
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return (
tf.reduce_sum(tf.stack(cal_loss))
+ tf.square(tf.reduce_sum(tf.stack(model_r_sum)) - prior_r_sum)
+ tf.square(tf.reduce_sum(tf.stack(model_i_sum)) - prior_i_sum)
)
def read_calibrate_and_model_dpss(
input_data_files,
input_model_files=None,
input_gain_files=None,
resid_outfilename=None,
gain_outfilename=None,
model_outfilename=None,
fitted_info_outfilename=None,
x_orientation="east",
clobber=False,
bllen_min=0.0,
bllen_max=np.inf,
bl_ew_min=0.0,
ex_ants=None,
select_ants=None,
gpu_index=None,
gpu_memory_limit=None,
precision=32,
use_autocorrs_in_weights=False,
**calibration_kwargs,
):
"""
Driver function for using calamity with DPSS modeling.
Parameters
----------
input_data_files: list of strings or UVData object.
list of paths to input files to read in and calibrate.
input_model_files: list of strings or UVData object, optional
list of paths to model files for overal phase/amp reference.
Default is None -> use input files as model for overall
phase and amplitude calibration.
input_gain_files: list of strings or UVCal object, optional
list of paths to gain files to use as initial guesses for calibration.
resid_outfilename: str, optional
path for file to write residuals.
default is None -> don't write out residuals.
gain_outfilename: str, optional
path to gain calfits to write fitted gains.
default is None -> don't write out gains.
model_outfilename, str, optional
path to file to write model output.
default is None -> Don't write model.
fitting_info_outfilename, str, optional
string to pickel fitting info to.
n_output_chunks: int optional
split up outputs into n_output_chunks chunked by time.
default is None -> write single output file.
bllen_min: float, optional
select all baselines with length greater then this value [meters].
default is 0.0
bllen_max: float, optional
select only baselines with length less then this value [meters].
default is np.inf.
bl_ew_min: float, optional
select all baselines with EW projected length greater then this value [meters].
default is 0.0
gpu_index: int, optional
limit visible GPUs to be the index of this GPU.
default: None -> all GPUs are visible.
gpu_memory_limit: float, optional
GiB of memory on GPU that can be used.
default None -> all memory available.
use_autocorrs_in_weights: bool, optional
if True, use smooth fits to autocorrelations as
inverse variance weights.
default is False.
calibration_kwargs: kwarg dict
see kwrags for calibration_and_model_dpss()
Returns
-------
model_fit: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid_fit: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains_fit: UVCal object
uvcal object containing fitted gains.
fit_info:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
gpus = tf.config.list_physical_devices("GPU")
if gpu_index is not None:
# See https://www.tensorflow.org/guide/gpu
if gpus:
if gpu_memory_limit is None:
tf.config.set_visible_devices(gpus[gpu_index], "GPU")
else:
tf.config.set_logical_device_configuration(
gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=gpu_memory_limit * 1024)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(
uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants
)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if input_model_files is not None:
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if uvd_model is not None:
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if input_gain_files is not None:
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
# run calibration with specified GPU device.
dtype = {32: np.float32, 64: np.float64}[precision]
if gpu_index is not None and gpus:
with tf.device(f"/device:GPU:{gpus[gpu_index].name[-1]}"):
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
else:
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
if resid_outfilename is not None:
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if gain_outfilename is not None:
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if model_outfilename is not None:
model_fit.write_uvh5(model_outfilename, clobber=clobber)
# don't write fitting_info_outfilename for now.
fit_info["calibration_kwargs"] = calibration_kwargs
fit_info["calibration_kwargs"]["dtype"] = dtype
# don't write fitting_info_outfilename for now.
return model_fit, resid_fit, gains_fit, fit_info
def input_output_parser():
ap = argparse.ArgumentParser()
sp = ap.add_argument_group("Input and Output Arguments.")
sp.add_argument("--input_data_files", type=str, nargs="+", help="paths to data files to calibrate.", required=True)
sp.add_argument(
"--input_model_files", type=str, nargs="+", help="paths to model files to set overal amplitude and phase."
)
sp.add_argument("--input_gain_files", type=str, nargs="+", help="paths to gains to use as a staring point.")
sp.add_argument("--resid_outfilename", type=str, default=None, help="postfix for resid output file.")
sp.add_argument("--model_outfilename", type=str, default=None, help="postfix for foreground model file.")
sp.add_argument("--gain_outfilename", type=str, default=None, help="path for writing fitted gains.")
sp.add_argument("--clobber", action="store_true", default="False", help="Overwrite existing outputs.")
sp.add_argument("--x_orientation", default="east", type=str, help="x_orientation of feeds to set in output gains.")
sp.add_argument(
"--bllen_min", default=0.0, type=float, help="minimum baseline length to include in calibration and outputs."
)
sp.add_argument(
"--bllen_max", default=np.inf, type=float, help="maximum baseline length to include in calbration and outputs."
)
sp.add_argument(
"--bl_ew_min",
default=0.0,
type=float,
help="minimum EW baseline component to include in calibration and outputs.",
)
sp.add_argument(
"--ex_ants", default=None, type=int, nargs="+", help="Antennas to exclude from calibration and modeling."
)
sp.add_argument(
"--select_ants",
default=None,
type=int,
nargs="+",
help="Antennas to select exclusively for calibration and modeling.",
)
sp.add_argument("--gpu_index", default=None, type=int, help="Index of GPU to run on (if on a multi-GPU machine).")
sp.add_argument("--gpu_memory_limit", default=None, type=int, help="Limit GPU memory use to this many GBytes.")
sp.add_argument("--precision", default=32, type=int, help="Number of bits to keep track of.")
return ap
def fitting_argparser():
ap = input_output_parser()
sp = ap.add_argument_group("General Fitting Arguments.")
sp.add_argument(
"--tol",
type=float,
default=1e-14,
help="Stop gradient descent after cost function converges to within this value.",
)
sp.add_argument(
"--optimizer", type=str, default="Adamax", help="First order optimizer to use for gradient descent."
)
sp.add_argument("--maxsteps", type=int, default=10000, help="Max number of steps to iterate during optimization.")
sp.add_argument("--verbose", default=False, action="store_true", help="lots of text ouputs.")
sp.add_argument(
"--use_min",
default=False,
action="store_true",
help="Use params for mimimum cost function derived. Otherwise, use the params last visited by the descent. Avoids momentum overshoot.",
)
sp.add_argument(
"--use_redundancy",
default=False,
action="store_true",
help="Model redundant visibilities with the same set of foreground parameters.",
)
sp.add_argument(
"--correct_model", default=True, action="store_true", help="Remove gain effects from foreground model."
)
sp.add_argument(
"--correct_resid", default=False, action="store_true", help="Apply fitted gains to the fitted residuals."
)
sp.add_argument(
"--graph_mode",
default=False,
action="store_true",
help="Pre-compile computational graph before running gradient descent. Not reccomended for GPUs.",
)
sp.add_argument(
"--init_guesses_from_previous_time_step",
default=False,
action="store_true",
help="initialize gain and foreground guesses from previous time step when calibrating multiple times.",
)
sp.add_argument("--learning_rate", type=float, default=1e-2, help="gradient descent learning rate.")
sp.add_argument(
"--red_tol", type=float, default=1.0, help="Tolerance for determining redundancy between baselines [meters]."
)
sp.add_argument(
"--skip_threshold",
type=float,
default=0.5,
help="Skip and flag time/polarization if more then this fractionf of data is flagged.",
)
sp.add_argument("--model_regularization", type=str, default="post_hoc")
sp.add_argument(
"--nsamples_in_weights", default=False, action="store_true", help="Weight contributions to MSE by nsamples."
)
sp.add_argument(
"--use_model_snr_weights",
default=False,
action="store_true",
help="If True, weight contributions to MSE as proportional to SNR.",
)
sp.add_argument(
"--use_autocorrs_in_weights",
default=False,
action="store_true",
help="If True, use autocorrelations to derive relative SNR weights.",
)
return ap
def dpss_fit_argparser():
ap = fitting_argparser()
sp = ap.add_argument_group("DPSS Specific Fitting Arguments.")
sp.add_argument("--horizon", default=1.0, type=float, help="Fraction of horizon delay to model with DPSS modes.")
sp.add_argument("--min_dly", default=0.0, type=float, help="Minimum delay [ns] to model with DPSS modes.")
sp.add_argument(
"--offset", default=0.0, type=float, help="Offset from horizon delay [ns] to model with DPSS modes."
)
return ap
|
yield_fg_model_array
|
Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data
|
import numpy as np
import tensorflow as tf
from pyuvdata import UVData, UVCal, UVFlag
from . import utils
import copy
import argparse
import itertools
import datetime
from pyuvdata import utils as uvutils
from .utils import echo
from .utils import PBARS
from . import cal_utils
from . import modeling
import re
OPTIMIZERS = {
"Adadelta": tf.optimizers.Adadelta,
"Adam": tf.optimizers.Adam,
"Adamax": tf.optimizers.Adamax,
"Ftrl": tf.optimizers.Ftrl,
"Nadam": tf.optimizers.Nadam,
"SGD": tf.optimizers.SGD,
"RMSprop": tf.optimizers.RMSprop,
"Adagrad": tf.optimizers.Adagrad
}
def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"""
Order dict keys in order of number of baselines in each group
chunk fit_groups in fg_model_comps_dict into chunks where all groups in the
same chunk have the same number of baselines in each group.
Parameters
----------
fg_model_comps_dict: dict
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
use_redundancy: bool, optional
If False, break fitting groups with the same number of baselines in each redundant
sub_group into different fitting groups with no redundancy in each
redundant subgroup. This is to prevent fitting groups with single
redundant groups of varying lengths from being lumped into different chunks
increasing the number of chunks has a more significant impact on run-time
then increasing the number of baselines in each chunk.
default is False.
Returns:
fg_model_comps_dict_chunked: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
"""
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if not use_redundancy:
# We can remove redundancies for fitting groups of baselines that have the same
# number of elements in each redundant group.
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
# only break up groups with small numbers of group elements.
if np.allclose(rlens, np.mean(rlens)) and len(rlens) < grp_size_threshold:
# split up groups.
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if nbl in chunked_keys:
chunked_keys[nbl].append(fit_grp)
if fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]:
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked
def tensorize_fg_model_comps_dict(
fg_model_comps_dict,
ants_map,
nfreqs,
use_redundancy=False,
dtype=np.float32,
notebook_progressbar=False,
verbose=False,
grp_size_threshold=5,
):
"""Convert per-baseline model components into a Ndata x Ncomponent tensor
Parameters
----------
fg_model_comps_dict: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
nfreqs: int, optional
number of frequency channels
dtype: numpy.dtype
tensor data types
default is np.float32
Returns
-------
fg_model_comps: list
list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)
where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same
modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls
to be the maximum number of vectors representing any of the ngrps baseline grps
which means that many rows in nvecs will be zero. For example, if we are modeling with
vectors that all span nbls=1 baseline and using delay-modes to model our data
then nvecs will equal the largest number of delay modes necessary to model the wedge
on all baselines even though the short baselines are described by far fewer modes
on short baselines, most of the rows along the vector dimension will therefor be zero.
This is wasteful of memory but it allows us to take advantage of the fast
dense matrix operations on a GPU.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
"""
echo(
f"{datetime.datetime.now()} Computing foreground components matrices...\n",
verbose=verbose,
)
# chunk foreground components.
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(
fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold
)
fg_model_comps = []
corr_inds = []
for nbls, nvecs in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for grpnum, modeling_grp in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for rgrpnum, red_grp in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice(rgrpnum * nfreqs, (rgrpnum + 1) * nfreqs)
dslice = slice(nbl * nfreqs, (nbl + 1) * nfreqs)
modeling_matrix[vecslice, grpnum, nbl] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][
compslice
].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return fg_model_comps, corr_inds
def tensorize_data(
uvdata,
corr_inds,
ants_map,
polarization,
time,
data_scale_factor=1.0,
weights=None,
nsamples_in_weights=False,
dtype=np.float32,
):
"""Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples.
"""
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = ants_map_inv[i], ants_map_inv[j]
bl = ap + (polarization,)
dinds1, dinds2, pol_ind = uvdata._key2inds(bl)
if len(dinds1) > 0:
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze()
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[i, j] = data.real.astype(dtype)
data_i[i, j] = data.imag.astype(dtype)
if weights is None:
wgts[i, j] = iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
else:
if ap in weights.get_antpairs():
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::-1])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]]
polnum = np.where(
weights.polarization_array
== uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)
)[0][0]
wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
wgtsum += np.sum(wgts[i, j])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return data_r, data_i, wgts
def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
"""Remove arbitrary phase and amplitude from deconvolved model and gains.
Parameters
----------
uvdata_reference_model: UVData object
Reference model for "true" visibilities.
uvdata_deconv: UVData object
"Deconvolved" data solved for in self-cal loop.
gains: UVCal object
Gains solved for in self-cal loop.
polarization: str
Polarization string to compute phase and amplitude correction for.
additional_flags: np.ndarray
Any additional flags you wish to use for excluding data from normalization
fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.
default is None -> Only exclude data in flags from reference model and deconv from
determinging normalization.
Returns
-------
N/A: Modifies uvdata_deconv and gains in-place.
"""
# compute and multiply out scale-factor accounting for overall amplitude and phase degeneracy.
polnum_data = np.where(
uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-7, rtol=0.0)
selection = (
~uvdata_deconv.flag_array[bltsel, :, :, polnum_data]
& ~uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]
)
if additional_flags is not None:
selection = selection & ~additional_flags[bltsel, :, :, polnum_data]
data_ratio = (
uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection]
/ uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection]
)
data_ratio[~np.isfinite(data_ratio)] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean(np.abs(data_ratio) ** 2.0))
scale_factor = scale_factor_abs # * np.exp(1j * scale_factor_phase) Need to figure this out later.
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where(
gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor) ** -0.5
def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
"""Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return gains_re, gains_im
# MASKED: yield_fg_model_array function (lines 401-443)
def fit_gains_and_foregrounds(
g_r,
g_i,
fg_r,
fg_i,
data_r,
data_i,
wgts,
fg_comps,
corr_inds,
use_min=False,
tol=1e-14,
maxsteps=10000,
optimizer="Adamax",
freeze_model=False,
verbose=False,
notebook_progressbar=False,
dtype=np.float32,
graph_mode=False,
n_profile_steps=0,
profile_log_dir="./logdir",
sky_model_r=None,
sky_model_i=None,
model_regularization=None,
graph_args_dict=None,
**opt_kwargs,
):
"""Run optimization loop to fit gains and foreground components.
Parameters
----------
g_r: tf.Tensor object.
tf.Tensor object holding real parts of gains.
g_i: tf.Tensor object.
tf.Tensor object holding imag parts of gains.
fg_r: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding foreground coeffs.
fg_i: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding imag coeffs.
data_r: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
real part of data to fit.
data_i: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
imag part of data to fit.
wgts: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
fg_comps: list:
list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)
represents vectors to be used in modeling visibilities.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
use_min: bool, optional
if True, use the value that minimizes the loss function
regardless of where optimization loop ended up
(prevents overshooting due to excess momentum)
tol: float, optional
halt optimization loop once the loss changes by less then this value.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
verbose: bool, optional
lots of text output
default is False.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
sky_model_r: list of tf.Tensor objects, optional
chunked tensors containing model in same format as data_r
sky_model_i: list of tf.Tensor objects, optional
chunked tensors containing model in the same format as data_i
model_regularization: str, optional
type of model regularization to perform. Currently support "sum"
where the sums of real and imaginary parts (across all bls and freqs)
are constrained to be the same as the sum of real and imag parts
of data.
opt_kwargs: kwarg dict
additional kwargs for tf.opt.Optimizer(). See tensorflow docs.
Returns
-------
g_r_opt: tf.Tensor object
real part of optimized gains.
g_i_opt: tf.Tensor object
imag part of optimized gains.
fg_r_opt: tf.Tensor object
real part of foreground coeffs.
fg_i_opt: tf.Tensor object.
imag part of optimized foreground coeffs.
fit_history: dict
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
if graph_args_dict is None:
graph_args_dict = {}
# initialize the optimizer.
echo(f"Using {str(dtype)} precision.")
echo(f"{datetime.datetime.now()} Provided the following opt_kwargs")
for k in opt_kwargs:
echo(f"{k}: {opt_kwargs[k]}")
opt = OPTIMIZERS[optimizer](**opt_kwargs)
# set up history recording
fit_history = {"loss": []}
min_loss = 9e99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
# build up list of lists of ant0 and ant1 for gather ops
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if not freeze_model:
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = [g_r, g_i] + fg_r + fg_i
else:
vars = [g_r, g_i]
echo(
f"{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...",
verbose=verbose,
)
if not freeze_model:
echo(
f"Performing gradient descent on total of {int(np.sum([fgr.shape[0] * fgr.shape[1] for fgr in fg_r]))} complex foreground parameters",
verbose=verbose,
)
echo(
f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[str(fgr.shape[:2]) + ':' + str(dc.shape[1]) for fgr, dc in zip(fg_r, data_r)]}",
verbose=verbose,
)
if model_regularization == "sum":
prior_r_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_r[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
prior_i_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_i[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
def loss_function():
return mse_chunked_sum_regularized(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
prior_r_sum=prior_r_sum,
prior_i_sum=prior_i_sum,
)
else:
def loss_function():
return mse_chunked(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if n_profile_steps > 0:
echo(f"{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...")
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace("train", step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(
f"{datetime.datetime.now()} Building Computational Graph...\n",
verbose=verbose,
)
loss = train_step()
echo(
f"{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...\n",
verbose=verbose,
)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history["loss"].append(loss.numpy())
if use_min and fit_history["loss"][-1] < min_loss:
# store the g_r, g_i, fg_r, fg_i values that minimize loss
# in case of overshoot.
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if step >= 1 and np.abs(fit_history["loss"][-1] - fit_history["loss"][-2]) < tol:
echo(
f"Tolerance thresshold met with delta of {np.abs(fit_history['loss'][-1] - fit_history['loss'][-2]):.2e}. Terminating...\n ",
verbose=verbose,
)
break
# if we dont use use_min, then the last
# visited set of parameters will be used
# to set the ML params.
if not use_min:
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(
f"{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...\n",
verbose=verbose,
)
return g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history
def insert_model_into_uvdata_tensor(
uvdata,
time,
polarization,
ants_map,
red_grps,
model_r,
model_i,
scale_factor=1.0,
):
"""Insert fitted tensor values back into uvdata object for tensor mode.
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
red_grps: list of lists of int 2-tuples
a list of lists of 2-tuples where all antenna pairs within each sublist
are redundant with eachother. Assumes that conjugates are correctly taken.
model_r: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data
model_i: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model
scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
Returns
-------
N/A: Modifies uvdata inplace.
"""
antpairs_data = uvdata.get_antpairs()
polnum = np.where(
uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)
)[0][0]
for red_grp in red_grps:
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
if ap in antpairs_data:
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] + 1j * model_i[i, j]
else:
dinds = uvdata.antpair2ind(ap[::-1])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] - 1j * model_i[i, j]
uvdata.data_array[dinds, 0, :, polnum] = model * scale_factor
def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
"""Insert tensorized gains back into uvcal object
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
gains_re: dict with int keys and tf.Tensor object values
dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object
representing the real component of the complex gain for antenna i.
gains_im: dict with int keys and tf.Tensor object values
dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object
representing the imag component of the complex gain for antenna j.
Returns
-------
N/A: Modifies uvcal inplace.
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (
gains_re[ant_index].numpy() + 1j * gains_im[ant_index].numpy()
)
def tensorize_fg_coeffs(
data,
wgts,
fg_model_comps,
notebook_progressbar=False,
verbose=False,
):
"""Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
"""
echo(
f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n",
verbose=verbose,
)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [
tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks)
]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
# set up linear leastsq
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = data[cnum].shape[1] * data[cnum].shape[2]
nvecs = fg_model_comps[cnum].shape[0]
# pad with zeros
for gnum in range(ngrps):
nonzero_rows = np.where(
np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1)
)[0]
if len(nonzero_rows) > 0:
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
# solve linear leastsq
fg_coeff_chunk.append(
tf.reshape(
tf.linalg.lstsq(
tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero],
tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)),
),
(nvecs_nonzero,),
)
)
# pad zeros at the end back up to nvecs.
fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)])
# add two additional dummy indices to satify broadcasting rules.
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(
f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n",
verbose=verbose,
)
return fg_coeffs
def get_auto_weights(uvdata, delay_extent=25.0):
"""
inverse variance weights from interpolated autocorrelation data
Parameters
----------
uvdata: UVData object
UVData object containing autocorrelation data to use for computing inverse noise weights.
offset: float, optional
Fit autocorrelation to delay components with this width.
Returns
-------
data_weights: UVFlag object
UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.
"""
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode="flag")
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
# compute autocorrelation weights
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if bl[0] == bl[1]:
d_wf = uvdata.get_data(bl)
w_wf = ~uvdata.get_flags(bl)
auto_fit_dict[bl] = []
for ds, fs in zip(d_wf, w_wf):
# fit autocorr waterfall to DPSS modes.
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze()
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
# from autocorrelation fits, weights
for bl in bls:
smooth_weights = 1.0 / (auto_fit_dict[bl[0], bl[0], bl[-1]] * auto_fit_dict[bl[1], bl[1], bl[-1]])
smooth_weights *= ~uvdata.get_flags(bl)
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where(
data_weights.polarization_array == uvutils.polstr2num(bl[-1], x_orientation=data_weights.x_orientation)
)[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights
def calibrate_and_model_tensor(
uvdata,
fg_model_comps_dict,
gains=None,
freeze_model=False,
optimizer="Adamax",
tol=1e-14,
maxsteps=10000,
include_autos=False,
verbose=False,
sky_model=None,
dtype=np.float32,
use_min=False,
use_redundancy=False,
notebook_progressbar=False,
correct_resid=False,
correct_model=True,
weights=None,
nsamples_in_weights=True,
graph_mode=False,
grp_size_threshold=5,
n_profile_steps=0,
profile_log_dir="./logdir",
model_regularization="sum",
init_guesses_from_previous_time_step=False,
skip_threshold=0.5,
use_model_snr_weights=False,
**opt_kwargs,
):
"""Perform simultaneous calibration and foreground fitting using tensors.
Parameters
----------
uvdata: UVData object
uvdata objet of data to be calibrated.
fg_model_comps_dict: dictionary
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents
gains: UVCal object
UVCal with initial gain estimates.
There many smart ways to obtain initial gain estimates
but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).
Users can determine initial gains with their favorite established cal algorithm.
default is None -> start with unity gains.
WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
tol: float, optional
halting condition for optimizer loop. Stop loop when the change in the cost function falls
below tol.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
include_autos: bool, optional
include autocorrelations in fitting.
default is False.
verbose: bool, optional
generate lots of text.
default is False.
sky_model: UVData object, optional
a sky-model to use for initial estimates of foreground coeffs and
to set overall flux scale and phases.
Note that this model is not used to obtain initial gain estimates.
These must be provided through the gains argument.
dtype: numpy dtype, optional
the float precision to be used in tensorflow gradient descent.
runtime scales roughly inversely linear with precision.
default is np.float32
use_min: bool, optional
If True, use the set of parameters that determine minimum as the ML params
If False, use the last set of parameters visited by the optimization loop.
use_redundancy: bool, optional
if true, solve for one set of foreground coeffs per redundant baseline group
instead of per baseline.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
red_tol: float, optional
tolerance for determining baselines redundant (meters)
default is 1.0
correct_resid: bool, optional
if True, gain correct residual.
default is False
correct_model: bool, optional
if True, gain correct model.
default is False
weights: UVFlag object, optional.
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is True.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
model_regularization: str, optional
option to regularize model
supported 'post_hoc', 'sum'
default is 'post_hoc'
which sets sum of amps equal and sum of phases equal.
init_guesses_from_previous_time_step: bool, optional
if True, then use foreground coeffs and gains from previous time-step to
initialize gains for next time step.
skip_threshold: float, optional
if less then this fraction of data is unflagged on a particular poltime,
flag the entire poltime.
opt_kwargs: kwarg_dict
kwargs for tf.optimizers
Returns
-------
model: UVData object
uvdata object containing model of the foregrounds
resid: UVData object
uvdata object containing resids which are the data minus
the model with gains multiplied and then with the gains divided out.
gains: UVCal object
uvcal object containing estimates of the gain solutions. These solutions
are not referenced to any sky model and are likely orders of
fit_history:
dictionary containing fit history with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
antpairs_data = uvdata.get_antpairs()
if not include_autos:
antpairs_data = set([ap for ap in antpairs_data if ap[0] != ap[1]])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
# get redundant groups
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if gains is None:
echo(
f"{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...\n",
verbose=verbose,
)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if sky_model is None and model_regularization is not None:
echo(
f"{datetime.datetime.now()} Sky model is None. Initializing from data...\n",
verbose=verbose,
)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for i, ant in enumerate(gains.ant_array)}
# generate tensors to hold foreground components.
fg_model_comps, corr_inds = tensorize_fg_model_comps_dict(
fg_model_comps_dict=fg_model_comps_dict,
ants_map=ants_map,
dtype=dtype,
nfreqs=sky_model.Nfreqs,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
grp_size_threshold=grp_size_threshold,
)
echo(
f"{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...\n",
verbose=verbose,
)
# delete fg_model_comps_dict. It can take up a lot of memory.
del fg_model_comps_dict
# loop through polarization and times.
for polnum, pol in enumerate(uvdata.get_pols()):
echo(
f"{datetime.datetime.now()} Working on pol {pol}, {polnum + 1} of {uvdata.Npols}...\n",
verbose=verbose,
)
fit_history_p = {}
first_time = True
for time_index, time in enumerate(np.unique(uvdata.time_array)):
echo(
f"{datetime.datetime.now()} Working on time {time_index + 1} of {uvdata.Ntimes}...\n",
verbose=verbose,
)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-7, rtol=0.0)
frac_unflagged = np.count_nonzero(~uvdata.flag_array[bltsel, 0, :, polnum]) / (
uvdata.Nbls * uvdata.Nfreqs
)
# check that fraction of unflagged data > skip_threshold.
if frac_unflagged >= skip_threshold:
rmsdata = np.sqrt(
np.mean(
np.abs(uvdata.data_array[bltsel, 0, :, polnum][~uvdata.flag_array[bltsel, 0, :, polnum]]) ** 2.0
)
)
echo(f"{datetime.datetime.now()} Tensorizing data...\n", verbose=verbose)
data_r, data_i, wgts = tensorize_data(
uvdata,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
nsamples_in_weights=nsamples_in_weights,
dtype=dtype,
)
if sky_model is not None:
echo(f"{datetime.datetime.now()} Tensorizing sky model...\n", verbose=verbose)
sky_model_r, sky_model_i, _ = tensorize_data(
sky_model,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
dtype=dtype,
)
else:
sky_model_r, sky_model_i = None, None
if first_time or not init_guesses_from_previous_time_step:
first_time = False
echo(f"{datetime.datetime.now()} Tensorizing Gains...\n", verbose=verbose)
g_r, g_i = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
# generate initial guess for foreground coeffs.
echo(
f"{datetime.datetime.now()} Tensorizing Foreground coeffs...\n",
verbose=verbose,
)
fg_r = tensorize_fg_coeffs(
data=data_r,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
fg_i = tensorize_fg_coeffs(
data=data_i,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for fgr, fgi, fgc in zip(fg_r, fg_i, fg_model_comps)]
wgts = [(tf.square(wm[0]) + tf.square(wm[1])) * w for wm, w in zip(wgts_model, wgts)]
del wgts_model
# renormalize
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [w / wgts_sum for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index],) = fit_gains_and_foregrounds(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
data_r=data_r,
data_i=data_i,
wgts=wgts,
fg_comps=fg_model_comps,
corr_inds=corr_inds,
optimizer=optimizer,
use_min=use_min,
freeze_model=freeze_model,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
tol=tol,
dtype=dtype,
maxsteps=maxsteps,
graph_mode=graph_mode,
n_profile_steps=n_profile_steps,
profile_log_dir=profile_log_dir,
sky_model_r=sky_model_r,
sky_model_i=sky_model_i,
model_regularization=model_regularization,
**opt_kwargs,
)
# insert into model uvdata.
insert_model_into_uvdata_tensor(
uvdata=model,
time=time,
polarization=pol,
ants_map=ants_map,
red_grps=red_grps,
model_r=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_r,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
model_i=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_i,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
scale_factor=rmsdata,
)
# insert gains into uvcal
insert_gains_into_uvcal(
uvcal=gains,
time=time,
polarization=pol,
gains_re=g_r,
gains_im=g_i,
)
else:
echo(
f"{datetime.datetime.now()}: Only {frac_unflagged * 100}-percent of data unflagged. Skipping...\n",
verbose=verbose,
)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = "skipped!"
# normalize on sky model if we use post-hoc regularization
if not freeze_model and model_regularization == "post_hoc" and np.any(~model.flag_array[bltsel]):
renormalize(
uvdata_reference_model=sky_model,
uvdata_deconv=model,
gains=gains,
polarization=pol,
time=time,
additional_flags=uvdata.flag_array,
)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if not correct_model:
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0 # set resid to zero where model is flagged.
resid.data_array[uvdata.flag_array] = 0.0 # also set resid to zero where data is flagged.
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return model, resid, gains, fit_history
def flag_poltime(data_object, time, polarization):
if isinstance(data_object, UVData):
bltsel = np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0)
polnum = np.where(
data_object.polarization_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
data_object.flag_array[bltsel, :, :, polnum] = True
data_object.data_array[bltsel, :, :, polnum] = 0.0
elif isinstance(data_object, UVCal):
polnum = np.where(
data_object.jones_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
gindt = np.where(np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0))[0][0]
data_object.gain_array[:, 0, :, gindt, polnum] = 1.0
data_object.flag_array[:, 0, :, gindt, polnum] = True
else:
raise ValueError("only supports data_object that is UVCal or UVData.")
def calibrate_and_model_mixed(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
ant_dly=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
red_tol_freq=0.5,
n_angle_bins=200,
notebook_progressbar=False,
use_redundancy=False,
use_tensorflow_to_derive_modeling_comps=False,
eigenval_cutoff=1e-10,
dtype_matinv=np.float64,
require_exact_angle_match=True,
angle_match_tol=1e-3,
grp_size_threshold=5,
model_comps_dict=None,
save_dict_to=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors
for baselines with no frequency redundancy and simple_cov components for
groups of baselines that have some frequency redundancy.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
ant_dly: float, optional
intrinsic chromaticity of each antenna element
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
red_tol_freq: float, optional
tolerance for treating two baselines as having some
frequency redundancy. When frequency redundancy exists, baselines
will be modeled jointly.
n_angle_bins: int, optional
number of angular bins to use between -pi and pi to compare baselines
default is 200
notebook_progressbar: bool, optional
if True, show graphical notebook progress bar that looks good in jupyter.
default is False.
use_redundancy: bool, optional
If True, model all baselines within each redundant group with the same components
If False, model each baseline within each redundant group with sepearate components.
default is False.
use_tensorflow_to_derive_modeling_comps: bool, optional
Use tensorflow methods to derive multi-baseline modeling components.
recommended if you have a GPU with enough memory to perform spectral decomposition
of multi-baseline covariance matrices.
eigenval_cutoff: float, optional
threshold of eigenvectors to include in modeling components.
dtype_matinv: numpy.dtype, optional
data type to use for deriving modeling components.
default is np.float64 (need higher precision for cov-mat like calculation)
grp_size_threshold: int, optional
groups with number of elements less then this value are split up into single baselines.
default is 5.
model_comps_dict: dict, optional
dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps
for more specifics.
default is None -> compute fitting groups automatically.
save_dict_to: str, optional
save model_comps_dict to hdf5 container if True
default is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_tensor.
see docstring of calibrate_and_model_tensor.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
# get fitting groups
fitting_grps, blvecs, _, _ = modeling.get_uv_overlapping_grps_conjugated(
uvdata,
red_tol=red_tol,
include_autos=include_autos,
red_tol_freq=red_tol_freq,
n_angle_bins=n_angle_bins,
notebook_progressbar=notebook_progressbar,
require_exact_angle_match=require_exact_angle_match,
angle_match_tol=angle_match_tol,
)
if model_comps_dict is None:
model_comps_dict = modeling.yield_mixed_comps(
fitting_grps,
blvecs,
uvdata.freq_array[0],
eigenval_cutoff=eigenval_cutoff,
use_tensorflow=use_tensorflow_to_derive_modeling_comps,
ant_dly=ant_dly,
horizon=horizon,
offset=offset,
min_dly=min_dly,
verbose=verbose,
dtype=dtype_matinv,
notebook_progressbar=notebook_progressbar,
grp_size_threshold=grp_size_threshold,
)
if save_dict_to is not None:
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def calibrate_and_model_dpss(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
notebook_progressbar=False,
fg_model_comps_dict=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(
uvdata,
horizon=horizon,
min_dly=min_dly,
offset=offset,
include_autos=include_autos,
red_tol=red_tol,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=dpss_model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def fg_model(fg_r, fg_i, fg_comps):
vr = tf.reduce_sum(fg_r * fg_comps, axis=0)
vi = tf.reduce_sum(fg_i * fg_comps, axis=0)
return vr, vi
def data_model(g_r, g_i, fg_r, fg_i, fg_comps, ant0_inds, ant1_inds):
gr0 = tf.gather(g_r, ant0_inds)
gr1 = tf.gather(g_r, ant1_inds)
gi0 = tf.gather(g_i, ant0_inds)
gi1 = tf.gather(g_i, ant1_inds)
grgr = gr0 * gr1
gigi = gi0 * gi1
grgi = gr0 * gi1
gigr = gi0 * gr1
vr, vi = fg_model(fg_r, fg_i, fg_comps)
model_r = (grgr + gigi) * vr + (grgi - gigr) * vi
model_i = (gigr - grgi) * vr + (grgr + gigi) * vi
return model_r, model_i
def mse(model_r, model_i, data_r, data_i, wgts):
return tf.reduce_sum((tf.square(data_r - model_r) + tf.square(data_i - model_i)) * wgts)
def mse_chunked(g_r, g_i, fg_r, fg_i, fg_comps, nchunks, data_r, data_i, wgts, ant0_inds, ant1_inds, dtype=np.float32):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return tf.reduce_sum(tf.stack(cal_loss))
def mse_chunked_sum_regularized(
g_r,
g_i,
fg_r,
fg_i,
fg_comps,
nchunks,
data_r,
data_i,
wgts,
ant0_inds,
ant1_inds,
prior_r_sum,
prior_i_sum,
dtype=np.float32,
):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_i_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_r_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
# compute sum of real and imag parts x weights for regularization.
model_r_sum[cnum] += tf.reduce_sum(model_r * wgts[cnum])
model_i_sum[cnum] += tf.reduce_sum(model_i * wgts[cnum])
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return (
tf.reduce_sum(tf.stack(cal_loss))
+ tf.square(tf.reduce_sum(tf.stack(model_r_sum)) - prior_r_sum)
+ tf.square(tf.reduce_sum(tf.stack(model_i_sum)) - prior_i_sum)
)
def read_calibrate_and_model_dpss(
input_data_files,
input_model_files=None,
input_gain_files=None,
resid_outfilename=None,
gain_outfilename=None,
model_outfilename=None,
fitted_info_outfilename=None,
x_orientation="east",
clobber=False,
bllen_min=0.0,
bllen_max=np.inf,
bl_ew_min=0.0,
ex_ants=None,
select_ants=None,
gpu_index=None,
gpu_memory_limit=None,
precision=32,
use_autocorrs_in_weights=False,
**calibration_kwargs,
):
"""
Driver function for using calamity with DPSS modeling.
Parameters
----------
input_data_files: list of strings or UVData object.
list of paths to input files to read in and calibrate.
input_model_files: list of strings or UVData object, optional
list of paths to model files for overal phase/amp reference.
Default is None -> use input files as model for overall
phase and amplitude calibration.
input_gain_files: list of strings or UVCal object, optional
list of paths to gain files to use as initial guesses for calibration.
resid_outfilename: str, optional
path for file to write residuals.
default is None -> don't write out residuals.
gain_outfilename: str, optional
path to gain calfits to write fitted gains.
default is None -> don't write out gains.
model_outfilename, str, optional
path to file to write model output.
default is None -> Don't write model.
fitting_info_outfilename, str, optional
string to pickel fitting info to.
n_output_chunks: int optional
split up outputs into n_output_chunks chunked by time.
default is None -> write single output file.
bllen_min: float, optional
select all baselines with length greater then this value [meters].
default is 0.0
bllen_max: float, optional
select only baselines with length less then this value [meters].
default is np.inf.
bl_ew_min: float, optional
select all baselines with EW projected length greater then this value [meters].
default is 0.0
gpu_index: int, optional
limit visible GPUs to be the index of this GPU.
default: None -> all GPUs are visible.
gpu_memory_limit: float, optional
GiB of memory on GPU that can be used.
default None -> all memory available.
use_autocorrs_in_weights: bool, optional
if True, use smooth fits to autocorrelations as
inverse variance weights.
default is False.
calibration_kwargs: kwarg dict
see kwrags for calibration_and_model_dpss()
Returns
-------
model_fit: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid_fit: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains_fit: UVCal object
uvcal object containing fitted gains.
fit_info:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
gpus = tf.config.list_physical_devices("GPU")
if gpu_index is not None:
# See https://www.tensorflow.org/guide/gpu
if gpus:
if gpu_memory_limit is None:
tf.config.set_visible_devices(gpus[gpu_index], "GPU")
else:
tf.config.set_logical_device_configuration(
gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=gpu_memory_limit * 1024)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(
uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants
)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if input_model_files is not None:
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if uvd_model is not None:
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if input_gain_files is not None:
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
# run calibration with specified GPU device.
dtype = {32: np.float32, 64: np.float64}[precision]
if gpu_index is not None and gpus:
with tf.device(f"/device:GPU:{gpus[gpu_index].name[-1]}"):
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
else:
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
if resid_outfilename is not None:
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if gain_outfilename is not None:
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if model_outfilename is not None:
model_fit.write_uvh5(model_outfilename, clobber=clobber)
# don't write fitting_info_outfilename for now.
fit_info["calibration_kwargs"] = calibration_kwargs
fit_info["calibration_kwargs"]["dtype"] = dtype
# don't write fitting_info_outfilename for now.
return model_fit, resid_fit, gains_fit, fit_info
def input_output_parser():
ap = argparse.ArgumentParser()
sp = ap.add_argument_group("Input and Output Arguments.")
sp.add_argument("--input_data_files", type=str, nargs="+", help="paths to data files to calibrate.", required=True)
sp.add_argument(
"--input_model_files", type=str, nargs="+", help="paths to model files to set overal amplitude and phase."
)
sp.add_argument("--input_gain_files", type=str, nargs="+", help="paths to gains to use as a staring point.")
sp.add_argument("--resid_outfilename", type=str, default=None, help="postfix for resid output file.")
sp.add_argument("--model_outfilename", type=str, default=None, help="postfix for foreground model file.")
sp.add_argument("--gain_outfilename", type=str, default=None, help="path for writing fitted gains.")
sp.add_argument("--clobber", action="store_true", default="False", help="Overwrite existing outputs.")
sp.add_argument("--x_orientation", default="east", type=str, help="x_orientation of feeds to set in output gains.")
sp.add_argument(
"--bllen_min", default=0.0, type=float, help="minimum baseline length to include in calibration and outputs."
)
sp.add_argument(
"--bllen_max", default=np.inf, type=float, help="maximum baseline length to include in calbration and outputs."
)
sp.add_argument(
"--bl_ew_min",
default=0.0,
type=float,
help="minimum EW baseline component to include in calibration and outputs.",
)
sp.add_argument(
"--ex_ants", default=None, type=int, nargs="+", help="Antennas to exclude from calibration and modeling."
)
sp.add_argument(
"--select_ants",
default=None,
type=int,
nargs="+",
help="Antennas to select exclusively for calibration and modeling.",
)
sp.add_argument("--gpu_index", default=None, type=int, help="Index of GPU to run on (if on a multi-GPU machine).")
sp.add_argument("--gpu_memory_limit", default=None, type=int, help="Limit GPU memory use to this many GBytes.")
sp.add_argument("--precision", default=32, type=int, help="Number of bits to keep track of.")
return ap
def fitting_argparser():
ap = input_output_parser()
sp = ap.add_argument_group("General Fitting Arguments.")
sp.add_argument(
"--tol",
type=float,
default=1e-14,
help="Stop gradient descent after cost function converges to within this value.",
)
sp.add_argument(
"--optimizer", type=str, default="Adamax", help="First order optimizer to use for gradient descent."
)
sp.add_argument("--maxsteps", type=int, default=10000, help="Max number of steps to iterate during optimization.")
sp.add_argument("--verbose", default=False, action="store_true", help="lots of text ouputs.")
sp.add_argument(
"--use_min",
default=False,
action="store_true",
help="Use params for mimimum cost function derived. Otherwise, use the params last visited by the descent. Avoids momentum overshoot.",
)
sp.add_argument(
"--use_redundancy",
default=False,
action="store_true",
help="Model redundant visibilities with the same set of foreground parameters.",
)
sp.add_argument(
"--correct_model", default=True, action="store_true", help="Remove gain effects from foreground model."
)
sp.add_argument(
"--correct_resid", default=False, action="store_true", help="Apply fitted gains to the fitted residuals."
)
sp.add_argument(
"--graph_mode",
default=False,
action="store_true",
help="Pre-compile computational graph before running gradient descent. Not reccomended for GPUs.",
)
sp.add_argument(
"--init_guesses_from_previous_time_step",
default=False,
action="store_true",
help="initialize gain and foreground guesses from previous time step when calibrating multiple times.",
)
sp.add_argument("--learning_rate", type=float, default=1e-2, help="gradient descent learning rate.")
sp.add_argument(
"--red_tol", type=float, default=1.0, help="Tolerance for determining redundancy between baselines [meters]."
)
sp.add_argument(
"--skip_threshold",
type=float,
default=0.5,
help="Skip and flag time/polarization if more then this fractionf of data is flagged.",
)
sp.add_argument("--model_regularization", type=str, default="post_hoc")
sp.add_argument(
"--nsamples_in_weights", default=False, action="store_true", help="Weight contributions to MSE by nsamples."
)
sp.add_argument(
"--use_model_snr_weights",
default=False,
action="store_true",
help="If True, weight contributions to MSE as proportional to SNR.",
)
sp.add_argument(
"--use_autocorrs_in_weights",
default=False,
action="store_true",
help="If True, use autocorrelations to derive relative SNR weights.",
)
return ap
def dpss_fit_argparser():
ap = fitting_argparser()
sp = ap.add_argument_group("DPSS Specific Fitting Arguments.")
sp.add_argument("--horizon", default=1.0, type=float, help="Fraction of horizon delay to model with DPSS modes.")
sp.add_argument("--min_dly", default=0.0, type=float, help="Minimum delay [ns] to model with DPSS modes.")
sp.add_argument(
"--offset", default=0.0, type=float, help="Offset from horizon delay [ns] to model with DPSS modes."
)
return ap
|
def yield_fg_model_array(
nants,
nfreqs,
fg_model_comps,
fg_coeffs,
corr_inds,
):
"""Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data
"""
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum(fg_coeffs[cnum] * fg_model_comps[cnum], axis=0).numpy()
for gnum in range(ngrps):
for blnum, (i, j) in enumerate(corr_inds[cnum][gnum]):
model[i, j] = gchunk[gnum, blnum]
return model
| 401 | 443 |
import numpy as np
import tensorflow as tf
from pyuvdata import UVData, UVCal, UVFlag
from . import utils
import copy
import argparse
import itertools
import datetime
from pyuvdata import utils as uvutils
from .utils import echo
from .utils import PBARS
from . import cal_utils
from . import modeling
import re
OPTIMIZERS = {
"Adadelta": tf.optimizers.Adadelta,
"Adam": tf.optimizers.Adam,
"Adamax": tf.optimizers.Adamax,
"Ftrl": tf.optimizers.Ftrl,
"Nadam": tf.optimizers.Nadam,
"SGD": tf.optimizers.SGD,
"RMSprop": tf.optimizers.RMSprop,
"Adagrad": tf.optimizers.Adagrad
}
def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"""
Order dict keys in order of number of baselines in each group
chunk fit_groups in fg_model_comps_dict into chunks where all groups in the
same chunk have the same number of baselines in each group.
Parameters
----------
fg_model_comps_dict: dict
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
use_redundancy: bool, optional
If False, break fitting groups with the same number of baselines in each redundant
sub_group into different fitting groups with no redundancy in each
redundant subgroup. This is to prevent fitting groups with single
redundant groups of varying lengths from being lumped into different chunks
increasing the number of chunks has a more significant impact on run-time
then increasing the number of baselines in each chunk.
default is False.
Returns:
fg_model_comps_dict_chunked: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
"""
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if not use_redundancy:
# We can remove redundancies for fitting groups of baselines that have the same
# number of elements in each redundant group.
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
# only break up groups with small numbers of group elements.
if np.allclose(rlens, np.mean(rlens)) and len(rlens) < grp_size_threshold:
# split up groups.
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if nbl in chunked_keys:
chunked_keys[nbl].append(fit_grp)
if fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]:
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked
def tensorize_fg_model_comps_dict(
fg_model_comps_dict,
ants_map,
nfreqs,
use_redundancy=False,
dtype=np.float32,
notebook_progressbar=False,
verbose=False,
grp_size_threshold=5,
):
"""Convert per-baseline model components into a Ndata x Ncomponent tensor
Parameters
----------
fg_model_comps_dict: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
nfreqs: int, optional
number of frequency channels
dtype: numpy.dtype
tensor data types
default is np.float32
Returns
-------
fg_model_comps: list
list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)
where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same
modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls
to be the maximum number of vectors representing any of the ngrps baseline grps
which means that many rows in nvecs will be zero. For example, if we are modeling with
vectors that all span nbls=1 baseline and using delay-modes to model our data
then nvecs will equal the largest number of delay modes necessary to model the wedge
on all baselines even though the short baselines are described by far fewer modes
on short baselines, most of the rows along the vector dimension will therefor be zero.
This is wasteful of memory but it allows us to take advantage of the fast
dense matrix operations on a GPU.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
"""
echo(
f"{datetime.datetime.now()} Computing foreground components matrices...\n",
verbose=verbose,
)
# chunk foreground components.
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(
fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold
)
fg_model_comps = []
corr_inds = []
for nbls, nvecs in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for grpnum, modeling_grp in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for rgrpnum, red_grp in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice(rgrpnum * nfreqs, (rgrpnum + 1) * nfreqs)
dslice = slice(nbl * nfreqs, (nbl + 1) * nfreqs)
modeling_matrix[vecslice, grpnum, nbl] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][
compslice
].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return fg_model_comps, corr_inds
def tensorize_data(
uvdata,
corr_inds,
ants_map,
polarization,
time,
data_scale_factor=1.0,
weights=None,
nsamples_in_weights=False,
dtype=np.float32,
):
"""Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples.
"""
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = ants_map_inv[i], ants_map_inv[j]
bl = ap + (polarization,)
dinds1, dinds2, pol_ind = uvdata._key2inds(bl)
if len(dinds1) > 0:
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze()
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[i, j] = data.real.astype(dtype)
data_i[i, j] = data.imag.astype(dtype)
if weights is None:
wgts[i, j] = iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
else:
if ap in weights.get_antpairs():
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::-1])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]]
polnum = np.where(
weights.polarization_array
== uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)
)[0][0]
wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
wgtsum += np.sum(wgts[i, j])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return data_r, data_i, wgts
def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
"""Remove arbitrary phase and amplitude from deconvolved model and gains.
Parameters
----------
uvdata_reference_model: UVData object
Reference model for "true" visibilities.
uvdata_deconv: UVData object
"Deconvolved" data solved for in self-cal loop.
gains: UVCal object
Gains solved for in self-cal loop.
polarization: str
Polarization string to compute phase and amplitude correction for.
additional_flags: np.ndarray
Any additional flags you wish to use for excluding data from normalization
fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.
default is None -> Only exclude data in flags from reference model and deconv from
determinging normalization.
Returns
-------
N/A: Modifies uvdata_deconv and gains in-place.
"""
# compute and multiply out scale-factor accounting for overall amplitude and phase degeneracy.
polnum_data = np.where(
uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-7, rtol=0.0)
selection = (
~uvdata_deconv.flag_array[bltsel, :, :, polnum_data]
& ~uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]
)
if additional_flags is not None:
selection = selection & ~additional_flags[bltsel, :, :, polnum_data]
data_ratio = (
uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection]
/ uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection]
)
data_ratio[~np.isfinite(data_ratio)] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean(np.abs(data_ratio) ** 2.0))
scale_factor = scale_factor_abs # * np.exp(1j * scale_factor_phase) Need to figure this out later.
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where(
gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor) ** -0.5
def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
"""Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return gains_re, gains_im
def yield_fg_model_array(
nants,
nfreqs,
fg_model_comps,
fg_coeffs,
corr_inds,
):
"""Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data
"""
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum(fg_coeffs[cnum] * fg_model_comps[cnum], axis=0).numpy()
for gnum in range(ngrps):
for blnum, (i, j) in enumerate(corr_inds[cnum][gnum]):
model[i, j] = gchunk[gnum, blnum]
return model
def fit_gains_and_foregrounds(
g_r,
g_i,
fg_r,
fg_i,
data_r,
data_i,
wgts,
fg_comps,
corr_inds,
use_min=False,
tol=1e-14,
maxsteps=10000,
optimizer="Adamax",
freeze_model=False,
verbose=False,
notebook_progressbar=False,
dtype=np.float32,
graph_mode=False,
n_profile_steps=0,
profile_log_dir="./logdir",
sky_model_r=None,
sky_model_i=None,
model_regularization=None,
graph_args_dict=None,
**opt_kwargs,
):
"""Run optimization loop to fit gains and foreground components.
Parameters
----------
g_r: tf.Tensor object.
tf.Tensor object holding real parts of gains.
g_i: tf.Tensor object.
tf.Tensor object holding imag parts of gains.
fg_r: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding foreground coeffs.
fg_i: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding imag coeffs.
data_r: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
real part of data to fit.
data_i: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
imag part of data to fit.
wgts: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
fg_comps: list:
list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)
represents vectors to be used in modeling visibilities.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
use_min: bool, optional
if True, use the value that minimizes the loss function
regardless of where optimization loop ended up
(prevents overshooting due to excess momentum)
tol: float, optional
halt optimization loop once the loss changes by less then this value.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
verbose: bool, optional
lots of text output
default is False.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
sky_model_r: list of tf.Tensor objects, optional
chunked tensors containing model in same format as data_r
sky_model_i: list of tf.Tensor objects, optional
chunked tensors containing model in the same format as data_i
model_regularization: str, optional
type of model regularization to perform. Currently support "sum"
where the sums of real and imaginary parts (across all bls and freqs)
are constrained to be the same as the sum of real and imag parts
of data.
opt_kwargs: kwarg dict
additional kwargs for tf.opt.Optimizer(). See tensorflow docs.
Returns
-------
g_r_opt: tf.Tensor object
real part of optimized gains.
g_i_opt: tf.Tensor object
imag part of optimized gains.
fg_r_opt: tf.Tensor object
real part of foreground coeffs.
fg_i_opt: tf.Tensor object.
imag part of optimized foreground coeffs.
fit_history: dict
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
if graph_args_dict is None:
graph_args_dict = {}
# initialize the optimizer.
echo(f"Using {str(dtype)} precision.")
echo(f"{datetime.datetime.now()} Provided the following opt_kwargs")
for k in opt_kwargs:
echo(f"{k}: {opt_kwargs[k]}")
opt = OPTIMIZERS[optimizer](**opt_kwargs)
# set up history recording
fit_history = {"loss": []}
min_loss = 9e99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
# build up list of lists of ant0 and ant1 for gather ops
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if not freeze_model:
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = [g_r, g_i] + fg_r + fg_i
else:
vars = [g_r, g_i]
echo(
f"{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...",
verbose=verbose,
)
if not freeze_model:
echo(
f"Performing gradient descent on total of {int(np.sum([fgr.shape[0] * fgr.shape[1] for fgr in fg_r]))} complex foreground parameters",
verbose=verbose,
)
echo(
f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[str(fgr.shape[:2]) + ':' + str(dc.shape[1]) for fgr, dc in zip(fg_r, data_r)]}",
verbose=verbose,
)
if model_regularization == "sum":
prior_r_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_r[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
prior_i_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_i[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
def loss_function():
return mse_chunked_sum_regularized(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
prior_r_sum=prior_r_sum,
prior_i_sum=prior_i_sum,
)
else:
def loss_function():
return mse_chunked(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if n_profile_steps > 0:
echo(f"{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...")
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace("train", step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(
f"{datetime.datetime.now()} Building Computational Graph...\n",
verbose=verbose,
)
loss = train_step()
echo(
f"{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...\n",
verbose=verbose,
)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history["loss"].append(loss.numpy())
if use_min and fit_history["loss"][-1] < min_loss:
# store the g_r, g_i, fg_r, fg_i values that minimize loss
# in case of overshoot.
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if step >= 1 and np.abs(fit_history["loss"][-1] - fit_history["loss"][-2]) < tol:
echo(
f"Tolerance thresshold met with delta of {np.abs(fit_history['loss'][-1] - fit_history['loss'][-2]):.2e}. Terminating...\n ",
verbose=verbose,
)
break
# if we dont use use_min, then the last
# visited set of parameters will be used
# to set the ML params.
if not use_min:
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(
f"{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...\n",
verbose=verbose,
)
return g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history
def insert_model_into_uvdata_tensor(
uvdata,
time,
polarization,
ants_map,
red_grps,
model_r,
model_i,
scale_factor=1.0,
):
"""Insert fitted tensor values back into uvdata object for tensor mode.
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
red_grps: list of lists of int 2-tuples
a list of lists of 2-tuples where all antenna pairs within each sublist
are redundant with eachother. Assumes that conjugates are correctly taken.
model_r: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data
model_i: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model
scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
Returns
-------
N/A: Modifies uvdata inplace.
"""
antpairs_data = uvdata.get_antpairs()
polnum = np.where(
uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)
)[0][0]
for red_grp in red_grps:
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
if ap in antpairs_data:
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] + 1j * model_i[i, j]
else:
dinds = uvdata.antpair2ind(ap[::-1])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] - 1j * model_i[i, j]
uvdata.data_array[dinds, 0, :, polnum] = model * scale_factor
def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
"""Insert tensorized gains back into uvcal object
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
gains_re: dict with int keys and tf.Tensor object values
dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object
representing the real component of the complex gain for antenna i.
gains_im: dict with int keys and tf.Tensor object values
dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object
representing the imag component of the complex gain for antenna j.
Returns
-------
N/A: Modifies uvcal inplace.
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (
gains_re[ant_index].numpy() + 1j * gains_im[ant_index].numpy()
)
def tensorize_fg_coeffs(
data,
wgts,
fg_model_comps,
notebook_progressbar=False,
verbose=False,
):
"""Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
"""
echo(
f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n",
verbose=verbose,
)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [
tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks)
]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
# set up linear leastsq
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = data[cnum].shape[1] * data[cnum].shape[2]
nvecs = fg_model_comps[cnum].shape[0]
# pad with zeros
for gnum in range(ngrps):
nonzero_rows = np.where(
np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1)
)[0]
if len(nonzero_rows) > 0:
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
# solve linear leastsq
fg_coeff_chunk.append(
tf.reshape(
tf.linalg.lstsq(
tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero],
tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)),
),
(nvecs_nonzero,),
)
)
# pad zeros at the end back up to nvecs.
fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)])
# add two additional dummy indices to satify broadcasting rules.
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(
f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n",
verbose=verbose,
)
return fg_coeffs
def get_auto_weights(uvdata, delay_extent=25.0):
"""
inverse variance weights from interpolated autocorrelation data
Parameters
----------
uvdata: UVData object
UVData object containing autocorrelation data to use for computing inverse noise weights.
offset: float, optional
Fit autocorrelation to delay components with this width.
Returns
-------
data_weights: UVFlag object
UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.
"""
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode="flag")
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
# compute autocorrelation weights
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if bl[0] == bl[1]:
d_wf = uvdata.get_data(bl)
w_wf = ~uvdata.get_flags(bl)
auto_fit_dict[bl] = []
for ds, fs in zip(d_wf, w_wf):
# fit autocorr waterfall to DPSS modes.
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze()
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
# from autocorrelation fits, weights
for bl in bls:
smooth_weights = 1.0 / (auto_fit_dict[bl[0], bl[0], bl[-1]] * auto_fit_dict[bl[1], bl[1], bl[-1]])
smooth_weights *= ~uvdata.get_flags(bl)
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where(
data_weights.polarization_array == uvutils.polstr2num(bl[-1], x_orientation=data_weights.x_orientation)
)[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights
def calibrate_and_model_tensor(
uvdata,
fg_model_comps_dict,
gains=None,
freeze_model=False,
optimizer="Adamax",
tol=1e-14,
maxsteps=10000,
include_autos=False,
verbose=False,
sky_model=None,
dtype=np.float32,
use_min=False,
use_redundancy=False,
notebook_progressbar=False,
correct_resid=False,
correct_model=True,
weights=None,
nsamples_in_weights=True,
graph_mode=False,
grp_size_threshold=5,
n_profile_steps=0,
profile_log_dir="./logdir",
model_regularization="sum",
init_guesses_from_previous_time_step=False,
skip_threshold=0.5,
use_model_snr_weights=False,
**opt_kwargs,
):
"""Perform simultaneous calibration and foreground fitting using tensors.
Parameters
----------
uvdata: UVData object
uvdata objet of data to be calibrated.
fg_model_comps_dict: dictionary
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents
gains: UVCal object
UVCal with initial gain estimates.
There many smart ways to obtain initial gain estimates
but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).
Users can determine initial gains with their favorite established cal algorithm.
default is None -> start with unity gains.
WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
tol: float, optional
halting condition for optimizer loop. Stop loop when the change in the cost function falls
below tol.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
include_autos: bool, optional
include autocorrelations in fitting.
default is False.
verbose: bool, optional
generate lots of text.
default is False.
sky_model: UVData object, optional
a sky-model to use for initial estimates of foreground coeffs and
to set overall flux scale and phases.
Note that this model is not used to obtain initial gain estimates.
These must be provided through the gains argument.
dtype: numpy dtype, optional
the float precision to be used in tensorflow gradient descent.
runtime scales roughly inversely linear with precision.
default is np.float32
use_min: bool, optional
If True, use the set of parameters that determine minimum as the ML params
If False, use the last set of parameters visited by the optimization loop.
use_redundancy: bool, optional
if true, solve for one set of foreground coeffs per redundant baseline group
instead of per baseline.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
red_tol: float, optional
tolerance for determining baselines redundant (meters)
default is 1.0
correct_resid: bool, optional
if True, gain correct residual.
default is False
correct_model: bool, optional
if True, gain correct model.
default is False
weights: UVFlag object, optional.
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is True.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
model_regularization: str, optional
option to regularize model
supported 'post_hoc', 'sum'
default is 'post_hoc'
which sets sum of amps equal and sum of phases equal.
init_guesses_from_previous_time_step: bool, optional
if True, then use foreground coeffs and gains from previous time-step to
initialize gains for next time step.
skip_threshold: float, optional
if less then this fraction of data is unflagged on a particular poltime,
flag the entire poltime.
opt_kwargs: kwarg_dict
kwargs for tf.optimizers
Returns
-------
model: UVData object
uvdata object containing model of the foregrounds
resid: UVData object
uvdata object containing resids which are the data minus
the model with gains multiplied and then with the gains divided out.
gains: UVCal object
uvcal object containing estimates of the gain solutions. These solutions
are not referenced to any sky model and are likely orders of
fit_history:
dictionary containing fit history with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
antpairs_data = uvdata.get_antpairs()
if not include_autos:
antpairs_data = set([ap for ap in antpairs_data if ap[0] != ap[1]])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
# get redundant groups
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if gains is None:
echo(
f"{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...\n",
verbose=verbose,
)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if sky_model is None and model_regularization is not None:
echo(
f"{datetime.datetime.now()} Sky model is None. Initializing from data...\n",
verbose=verbose,
)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for i, ant in enumerate(gains.ant_array)}
# generate tensors to hold foreground components.
fg_model_comps, corr_inds = tensorize_fg_model_comps_dict(
fg_model_comps_dict=fg_model_comps_dict,
ants_map=ants_map,
dtype=dtype,
nfreqs=sky_model.Nfreqs,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
grp_size_threshold=grp_size_threshold,
)
echo(
f"{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...\n",
verbose=verbose,
)
# delete fg_model_comps_dict. It can take up a lot of memory.
del fg_model_comps_dict
# loop through polarization and times.
for polnum, pol in enumerate(uvdata.get_pols()):
echo(
f"{datetime.datetime.now()} Working on pol {pol}, {polnum + 1} of {uvdata.Npols}...\n",
verbose=verbose,
)
fit_history_p = {}
first_time = True
for time_index, time in enumerate(np.unique(uvdata.time_array)):
echo(
f"{datetime.datetime.now()} Working on time {time_index + 1} of {uvdata.Ntimes}...\n",
verbose=verbose,
)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-7, rtol=0.0)
frac_unflagged = np.count_nonzero(~uvdata.flag_array[bltsel, 0, :, polnum]) / (
uvdata.Nbls * uvdata.Nfreqs
)
# check that fraction of unflagged data > skip_threshold.
if frac_unflagged >= skip_threshold:
rmsdata = np.sqrt(
np.mean(
np.abs(uvdata.data_array[bltsel, 0, :, polnum][~uvdata.flag_array[bltsel, 0, :, polnum]]) ** 2.0
)
)
echo(f"{datetime.datetime.now()} Tensorizing data...\n", verbose=verbose)
data_r, data_i, wgts = tensorize_data(
uvdata,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
nsamples_in_weights=nsamples_in_weights,
dtype=dtype,
)
if sky_model is not None:
echo(f"{datetime.datetime.now()} Tensorizing sky model...\n", verbose=verbose)
sky_model_r, sky_model_i, _ = tensorize_data(
sky_model,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
dtype=dtype,
)
else:
sky_model_r, sky_model_i = None, None
if first_time or not init_guesses_from_previous_time_step:
first_time = False
echo(f"{datetime.datetime.now()} Tensorizing Gains...\n", verbose=verbose)
g_r, g_i = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
# generate initial guess for foreground coeffs.
echo(
f"{datetime.datetime.now()} Tensorizing Foreground coeffs...\n",
verbose=verbose,
)
fg_r = tensorize_fg_coeffs(
data=data_r,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
fg_i = tensorize_fg_coeffs(
data=data_i,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for fgr, fgi, fgc in zip(fg_r, fg_i, fg_model_comps)]
wgts = [(tf.square(wm[0]) + tf.square(wm[1])) * w for wm, w in zip(wgts_model, wgts)]
del wgts_model
# renormalize
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [w / wgts_sum for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index],) = fit_gains_and_foregrounds(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
data_r=data_r,
data_i=data_i,
wgts=wgts,
fg_comps=fg_model_comps,
corr_inds=corr_inds,
optimizer=optimizer,
use_min=use_min,
freeze_model=freeze_model,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
tol=tol,
dtype=dtype,
maxsteps=maxsteps,
graph_mode=graph_mode,
n_profile_steps=n_profile_steps,
profile_log_dir=profile_log_dir,
sky_model_r=sky_model_r,
sky_model_i=sky_model_i,
model_regularization=model_regularization,
**opt_kwargs,
)
# insert into model uvdata.
insert_model_into_uvdata_tensor(
uvdata=model,
time=time,
polarization=pol,
ants_map=ants_map,
red_grps=red_grps,
model_r=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_r,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
model_i=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_i,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
scale_factor=rmsdata,
)
# insert gains into uvcal
insert_gains_into_uvcal(
uvcal=gains,
time=time,
polarization=pol,
gains_re=g_r,
gains_im=g_i,
)
else:
echo(
f"{datetime.datetime.now()}: Only {frac_unflagged * 100}-percent of data unflagged. Skipping...\n",
verbose=verbose,
)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = "skipped!"
# normalize on sky model if we use post-hoc regularization
if not freeze_model and model_regularization == "post_hoc" and np.any(~model.flag_array[bltsel]):
renormalize(
uvdata_reference_model=sky_model,
uvdata_deconv=model,
gains=gains,
polarization=pol,
time=time,
additional_flags=uvdata.flag_array,
)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if not correct_model:
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0 # set resid to zero where model is flagged.
resid.data_array[uvdata.flag_array] = 0.0 # also set resid to zero where data is flagged.
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return model, resid, gains, fit_history
def flag_poltime(data_object, time, polarization):
if isinstance(data_object, UVData):
bltsel = np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0)
polnum = np.where(
data_object.polarization_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
data_object.flag_array[bltsel, :, :, polnum] = True
data_object.data_array[bltsel, :, :, polnum] = 0.0
elif isinstance(data_object, UVCal):
polnum = np.where(
data_object.jones_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
gindt = np.where(np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0))[0][0]
data_object.gain_array[:, 0, :, gindt, polnum] = 1.0
data_object.flag_array[:, 0, :, gindt, polnum] = True
else:
raise ValueError("only supports data_object that is UVCal or UVData.")
def calibrate_and_model_mixed(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
ant_dly=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
red_tol_freq=0.5,
n_angle_bins=200,
notebook_progressbar=False,
use_redundancy=False,
use_tensorflow_to_derive_modeling_comps=False,
eigenval_cutoff=1e-10,
dtype_matinv=np.float64,
require_exact_angle_match=True,
angle_match_tol=1e-3,
grp_size_threshold=5,
model_comps_dict=None,
save_dict_to=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors
for baselines with no frequency redundancy and simple_cov components for
groups of baselines that have some frequency redundancy.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
ant_dly: float, optional
intrinsic chromaticity of each antenna element
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
red_tol_freq: float, optional
tolerance for treating two baselines as having some
frequency redundancy. When frequency redundancy exists, baselines
will be modeled jointly.
n_angle_bins: int, optional
number of angular bins to use between -pi and pi to compare baselines
default is 200
notebook_progressbar: bool, optional
if True, show graphical notebook progress bar that looks good in jupyter.
default is False.
use_redundancy: bool, optional
If True, model all baselines within each redundant group with the same components
If False, model each baseline within each redundant group with sepearate components.
default is False.
use_tensorflow_to_derive_modeling_comps: bool, optional
Use tensorflow methods to derive multi-baseline modeling components.
recommended if you have a GPU with enough memory to perform spectral decomposition
of multi-baseline covariance matrices.
eigenval_cutoff: float, optional
threshold of eigenvectors to include in modeling components.
dtype_matinv: numpy.dtype, optional
data type to use for deriving modeling components.
default is np.float64 (need higher precision for cov-mat like calculation)
grp_size_threshold: int, optional
groups with number of elements less then this value are split up into single baselines.
default is 5.
model_comps_dict: dict, optional
dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps
for more specifics.
default is None -> compute fitting groups automatically.
save_dict_to: str, optional
save model_comps_dict to hdf5 container if True
default is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_tensor.
see docstring of calibrate_and_model_tensor.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
# get fitting groups
fitting_grps, blvecs, _, _ = modeling.get_uv_overlapping_grps_conjugated(
uvdata,
red_tol=red_tol,
include_autos=include_autos,
red_tol_freq=red_tol_freq,
n_angle_bins=n_angle_bins,
notebook_progressbar=notebook_progressbar,
require_exact_angle_match=require_exact_angle_match,
angle_match_tol=angle_match_tol,
)
if model_comps_dict is None:
model_comps_dict = modeling.yield_mixed_comps(
fitting_grps,
blvecs,
uvdata.freq_array[0],
eigenval_cutoff=eigenval_cutoff,
use_tensorflow=use_tensorflow_to_derive_modeling_comps,
ant_dly=ant_dly,
horizon=horizon,
offset=offset,
min_dly=min_dly,
verbose=verbose,
dtype=dtype_matinv,
notebook_progressbar=notebook_progressbar,
grp_size_threshold=grp_size_threshold,
)
if save_dict_to is not None:
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def calibrate_and_model_dpss(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
notebook_progressbar=False,
fg_model_comps_dict=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(
uvdata,
horizon=horizon,
min_dly=min_dly,
offset=offset,
include_autos=include_autos,
red_tol=red_tol,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=dpss_model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def fg_model(fg_r, fg_i, fg_comps):
vr = tf.reduce_sum(fg_r * fg_comps, axis=0)
vi = tf.reduce_sum(fg_i * fg_comps, axis=0)
return vr, vi
def data_model(g_r, g_i, fg_r, fg_i, fg_comps, ant0_inds, ant1_inds):
gr0 = tf.gather(g_r, ant0_inds)
gr1 = tf.gather(g_r, ant1_inds)
gi0 = tf.gather(g_i, ant0_inds)
gi1 = tf.gather(g_i, ant1_inds)
grgr = gr0 * gr1
gigi = gi0 * gi1
grgi = gr0 * gi1
gigr = gi0 * gr1
vr, vi = fg_model(fg_r, fg_i, fg_comps)
model_r = (grgr + gigi) * vr + (grgi - gigr) * vi
model_i = (gigr - grgi) * vr + (grgr + gigi) * vi
return model_r, model_i
def mse(model_r, model_i, data_r, data_i, wgts):
return tf.reduce_sum((tf.square(data_r - model_r) + tf.square(data_i - model_i)) * wgts)
def mse_chunked(g_r, g_i, fg_r, fg_i, fg_comps, nchunks, data_r, data_i, wgts, ant0_inds, ant1_inds, dtype=np.float32):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return tf.reduce_sum(tf.stack(cal_loss))
def mse_chunked_sum_regularized(
g_r,
g_i,
fg_r,
fg_i,
fg_comps,
nchunks,
data_r,
data_i,
wgts,
ant0_inds,
ant1_inds,
prior_r_sum,
prior_i_sum,
dtype=np.float32,
):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_i_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_r_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
# compute sum of real and imag parts x weights for regularization.
model_r_sum[cnum] += tf.reduce_sum(model_r * wgts[cnum])
model_i_sum[cnum] += tf.reduce_sum(model_i * wgts[cnum])
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return (
tf.reduce_sum(tf.stack(cal_loss))
+ tf.square(tf.reduce_sum(tf.stack(model_r_sum)) - prior_r_sum)
+ tf.square(tf.reduce_sum(tf.stack(model_i_sum)) - prior_i_sum)
)
def read_calibrate_and_model_dpss(
input_data_files,
input_model_files=None,
input_gain_files=None,
resid_outfilename=None,
gain_outfilename=None,
model_outfilename=None,
fitted_info_outfilename=None,
x_orientation="east",
clobber=False,
bllen_min=0.0,
bllen_max=np.inf,
bl_ew_min=0.0,
ex_ants=None,
select_ants=None,
gpu_index=None,
gpu_memory_limit=None,
precision=32,
use_autocorrs_in_weights=False,
**calibration_kwargs,
):
"""
Driver function for using calamity with DPSS modeling.
Parameters
----------
input_data_files: list of strings or UVData object.
list of paths to input files to read in and calibrate.
input_model_files: list of strings or UVData object, optional
list of paths to model files for overal phase/amp reference.
Default is None -> use input files as model for overall
phase and amplitude calibration.
input_gain_files: list of strings or UVCal object, optional
list of paths to gain files to use as initial guesses for calibration.
resid_outfilename: str, optional
path for file to write residuals.
default is None -> don't write out residuals.
gain_outfilename: str, optional
path to gain calfits to write fitted gains.
default is None -> don't write out gains.
model_outfilename, str, optional
path to file to write model output.
default is None -> Don't write model.
fitting_info_outfilename, str, optional
string to pickel fitting info to.
n_output_chunks: int optional
split up outputs into n_output_chunks chunked by time.
default is None -> write single output file.
bllen_min: float, optional
select all baselines with length greater then this value [meters].
default is 0.0
bllen_max: float, optional
select only baselines with length less then this value [meters].
default is np.inf.
bl_ew_min: float, optional
select all baselines with EW projected length greater then this value [meters].
default is 0.0
gpu_index: int, optional
limit visible GPUs to be the index of this GPU.
default: None -> all GPUs are visible.
gpu_memory_limit: float, optional
GiB of memory on GPU that can be used.
default None -> all memory available.
use_autocorrs_in_weights: bool, optional
if True, use smooth fits to autocorrelations as
inverse variance weights.
default is False.
calibration_kwargs: kwarg dict
see kwrags for calibration_and_model_dpss()
Returns
-------
model_fit: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid_fit: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains_fit: UVCal object
uvcal object containing fitted gains.
fit_info:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
gpus = tf.config.list_physical_devices("GPU")
if gpu_index is not None:
# See https://www.tensorflow.org/guide/gpu
if gpus:
if gpu_memory_limit is None:
tf.config.set_visible_devices(gpus[gpu_index], "GPU")
else:
tf.config.set_logical_device_configuration(
gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=gpu_memory_limit * 1024)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(
uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants
)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if input_model_files is not None:
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if uvd_model is not None:
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if input_gain_files is not None:
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
# run calibration with specified GPU device.
dtype = {32: np.float32, 64: np.float64}[precision]
if gpu_index is not None and gpus:
with tf.device(f"/device:GPU:{gpus[gpu_index].name[-1]}"):
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
else:
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
if resid_outfilename is not None:
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if gain_outfilename is not None:
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if model_outfilename is not None:
model_fit.write_uvh5(model_outfilename, clobber=clobber)
# don't write fitting_info_outfilename for now.
fit_info["calibration_kwargs"] = calibration_kwargs
fit_info["calibration_kwargs"]["dtype"] = dtype
# don't write fitting_info_outfilename for now.
return model_fit, resid_fit, gains_fit, fit_info
def input_output_parser():
ap = argparse.ArgumentParser()
sp = ap.add_argument_group("Input and Output Arguments.")
sp.add_argument("--input_data_files", type=str, nargs="+", help="paths to data files to calibrate.", required=True)
sp.add_argument(
"--input_model_files", type=str, nargs="+", help="paths to model files to set overal amplitude and phase."
)
sp.add_argument("--input_gain_files", type=str, nargs="+", help="paths to gains to use as a staring point.")
sp.add_argument("--resid_outfilename", type=str, default=None, help="postfix for resid output file.")
sp.add_argument("--model_outfilename", type=str, default=None, help="postfix for foreground model file.")
sp.add_argument("--gain_outfilename", type=str, default=None, help="path for writing fitted gains.")
sp.add_argument("--clobber", action="store_true", default="False", help="Overwrite existing outputs.")
sp.add_argument("--x_orientation", default="east", type=str, help="x_orientation of feeds to set in output gains.")
sp.add_argument(
"--bllen_min", default=0.0, type=float, help="minimum baseline length to include in calibration and outputs."
)
sp.add_argument(
"--bllen_max", default=np.inf, type=float, help="maximum baseline length to include in calbration and outputs."
)
sp.add_argument(
"--bl_ew_min",
default=0.0,
type=float,
help="minimum EW baseline component to include in calibration and outputs.",
)
sp.add_argument(
"--ex_ants", default=None, type=int, nargs="+", help="Antennas to exclude from calibration and modeling."
)
sp.add_argument(
"--select_ants",
default=None,
type=int,
nargs="+",
help="Antennas to select exclusively for calibration and modeling.",
)
sp.add_argument("--gpu_index", default=None, type=int, help="Index of GPU to run on (if on a multi-GPU machine).")
sp.add_argument("--gpu_memory_limit", default=None, type=int, help="Limit GPU memory use to this many GBytes.")
sp.add_argument("--precision", default=32, type=int, help="Number of bits to keep track of.")
return ap
def fitting_argparser():
ap = input_output_parser()
sp = ap.add_argument_group("General Fitting Arguments.")
sp.add_argument(
"--tol",
type=float,
default=1e-14,
help="Stop gradient descent after cost function converges to within this value.",
)
sp.add_argument(
"--optimizer", type=str, default="Adamax", help="First order optimizer to use for gradient descent."
)
sp.add_argument("--maxsteps", type=int, default=10000, help="Max number of steps to iterate during optimization.")
sp.add_argument("--verbose", default=False, action="store_true", help="lots of text ouputs.")
sp.add_argument(
"--use_min",
default=False,
action="store_true",
help="Use params for mimimum cost function derived. Otherwise, use the params last visited by the descent. Avoids momentum overshoot.",
)
sp.add_argument(
"--use_redundancy",
default=False,
action="store_true",
help="Model redundant visibilities with the same set of foreground parameters.",
)
sp.add_argument(
"--correct_model", default=True, action="store_true", help="Remove gain effects from foreground model."
)
sp.add_argument(
"--correct_resid", default=False, action="store_true", help="Apply fitted gains to the fitted residuals."
)
sp.add_argument(
"--graph_mode",
default=False,
action="store_true",
help="Pre-compile computational graph before running gradient descent. Not reccomended for GPUs.",
)
sp.add_argument(
"--init_guesses_from_previous_time_step",
default=False,
action="store_true",
help="initialize gain and foreground guesses from previous time step when calibrating multiple times.",
)
sp.add_argument("--learning_rate", type=float, default=1e-2, help="gradient descent learning rate.")
sp.add_argument(
"--red_tol", type=float, default=1.0, help="Tolerance for determining redundancy between baselines [meters]."
)
sp.add_argument(
"--skip_threshold",
type=float,
default=0.5,
help="Skip and flag time/polarization if more then this fractionf of data is flagged.",
)
sp.add_argument("--model_regularization", type=str, default="post_hoc")
sp.add_argument(
"--nsamples_in_weights", default=False, action="store_true", help="Weight contributions to MSE by nsamples."
)
sp.add_argument(
"--use_model_snr_weights",
default=False,
action="store_true",
help="If True, weight contributions to MSE as proportional to SNR.",
)
sp.add_argument(
"--use_autocorrs_in_weights",
default=False,
action="store_true",
help="If True, use autocorrelations to derive relative SNR weights.",
)
return ap
def dpss_fit_argparser():
ap = fitting_argparser()
sp = ap.add_argument_group("DPSS Specific Fitting Arguments.")
sp.add_argument("--horizon", default=1.0, type=float, help="Fraction of horizon delay to model with DPSS modes.")
sp.add_argument("--min_dly", default=0.0, type=float, help="Minimum delay [ns] to model with DPSS modes.")
sp.add_argument(
"--offset", default=0.0, type=float, help="Offset from horizon delay [ns] to model with DPSS modes."
)
return ap
|
calibrate_and_model_dpss
|
Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
|
import numpy as np
import tensorflow as tf
from pyuvdata import UVData, UVCal, UVFlag
from . import utils
import copy
import argparse
import itertools
import datetime
from pyuvdata import utils as uvutils
from .utils import echo
from .utils import PBARS
from . import cal_utils
from . import modeling
import re
OPTIMIZERS = {
"Adadelta": tf.optimizers.Adadelta,
"Adam": tf.optimizers.Adam,
"Adamax": tf.optimizers.Adamax,
"Ftrl": tf.optimizers.Ftrl,
"Nadam": tf.optimizers.Nadam,
"SGD": tf.optimizers.SGD,
"RMSprop": tf.optimizers.RMSprop,
"Adagrad": tf.optimizers.Adagrad
}
def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"""
Order dict keys in order of number of baselines in each group
chunk fit_groups in fg_model_comps_dict into chunks where all groups in the
same chunk have the same number of baselines in each group.
Parameters
----------
fg_model_comps_dict: dict
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
use_redundancy: bool, optional
If False, break fitting groups with the same number of baselines in each redundant
sub_group into different fitting groups with no redundancy in each
redundant subgroup. This is to prevent fitting groups with single
redundant groups of varying lengths from being lumped into different chunks
increasing the number of chunks has a more significant impact on run-time
then increasing the number of baselines in each chunk.
default is False.
Returns:
fg_model_comps_dict_chunked: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
"""
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if not use_redundancy:
# We can remove redundancies for fitting groups of baselines that have the same
# number of elements in each redundant group.
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
# only break up groups with small numbers of group elements.
if np.allclose(rlens, np.mean(rlens)) and len(rlens) < grp_size_threshold:
# split up groups.
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if nbl in chunked_keys:
chunked_keys[nbl].append(fit_grp)
if fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]:
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked
def tensorize_fg_model_comps_dict(
fg_model_comps_dict,
ants_map,
nfreqs,
use_redundancy=False,
dtype=np.float32,
notebook_progressbar=False,
verbose=False,
grp_size_threshold=5,
):
"""Convert per-baseline model components into a Ndata x Ncomponent tensor
Parameters
----------
fg_model_comps_dict: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
nfreqs: int, optional
number of frequency channels
dtype: numpy.dtype
tensor data types
default is np.float32
Returns
-------
fg_model_comps: list
list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)
where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same
modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls
to be the maximum number of vectors representing any of the ngrps baseline grps
which means that many rows in nvecs will be zero. For example, if we are modeling with
vectors that all span nbls=1 baseline and using delay-modes to model our data
then nvecs will equal the largest number of delay modes necessary to model the wedge
on all baselines even though the short baselines are described by far fewer modes
on short baselines, most of the rows along the vector dimension will therefor be zero.
This is wasteful of memory but it allows us to take advantage of the fast
dense matrix operations on a GPU.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
"""
echo(
f"{datetime.datetime.now()} Computing foreground components matrices...\n",
verbose=verbose,
)
# chunk foreground components.
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(
fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold
)
fg_model_comps = []
corr_inds = []
for nbls, nvecs in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for grpnum, modeling_grp in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for rgrpnum, red_grp in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice(rgrpnum * nfreqs, (rgrpnum + 1) * nfreqs)
dslice = slice(nbl * nfreqs, (nbl + 1) * nfreqs)
modeling_matrix[vecslice, grpnum, nbl] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][
compslice
].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return fg_model_comps, corr_inds
def tensorize_data(
uvdata,
corr_inds,
ants_map,
polarization,
time,
data_scale_factor=1.0,
weights=None,
nsamples_in_weights=False,
dtype=np.float32,
):
"""Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples.
"""
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = ants_map_inv[i], ants_map_inv[j]
bl = ap + (polarization,)
dinds1, dinds2, pol_ind = uvdata._key2inds(bl)
if len(dinds1) > 0:
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze()
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[i, j] = data.real.astype(dtype)
data_i[i, j] = data.imag.astype(dtype)
if weights is None:
wgts[i, j] = iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
else:
if ap in weights.get_antpairs():
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::-1])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]]
polnum = np.where(
weights.polarization_array
== uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)
)[0][0]
wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
wgtsum += np.sum(wgts[i, j])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return data_r, data_i, wgts
def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
"""Remove arbitrary phase and amplitude from deconvolved model and gains.
Parameters
----------
uvdata_reference_model: UVData object
Reference model for "true" visibilities.
uvdata_deconv: UVData object
"Deconvolved" data solved for in self-cal loop.
gains: UVCal object
Gains solved for in self-cal loop.
polarization: str
Polarization string to compute phase and amplitude correction for.
additional_flags: np.ndarray
Any additional flags you wish to use for excluding data from normalization
fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.
default is None -> Only exclude data in flags from reference model and deconv from
determinging normalization.
Returns
-------
N/A: Modifies uvdata_deconv and gains in-place.
"""
# compute and multiply out scale-factor accounting for overall amplitude and phase degeneracy.
polnum_data = np.where(
uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-7, rtol=0.0)
selection = (
~uvdata_deconv.flag_array[bltsel, :, :, polnum_data]
& ~uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]
)
if additional_flags is not None:
selection = selection & ~additional_flags[bltsel, :, :, polnum_data]
data_ratio = (
uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection]
/ uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection]
)
data_ratio[~np.isfinite(data_ratio)] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean(np.abs(data_ratio) ** 2.0))
scale_factor = scale_factor_abs # * np.exp(1j * scale_factor_phase) Need to figure this out later.
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where(
gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor) ** -0.5
def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
"""Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return gains_re, gains_im
def yield_fg_model_array(
nants,
nfreqs,
fg_model_comps,
fg_coeffs,
corr_inds,
):
"""Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data
"""
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum(fg_coeffs[cnum] * fg_model_comps[cnum], axis=0).numpy()
for gnum in range(ngrps):
for blnum, (i, j) in enumerate(corr_inds[cnum][gnum]):
model[i, j] = gchunk[gnum, blnum]
return model
def fit_gains_and_foregrounds(
g_r,
g_i,
fg_r,
fg_i,
data_r,
data_i,
wgts,
fg_comps,
corr_inds,
use_min=False,
tol=1e-14,
maxsteps=10000,
optimizer="Adamax",
freeze_model=False,
verbose=False,
notebook_progressbar=False,
dtype=np.float32,
graph_mode=False,
n_profile_steps=0,
profile_log_dir="./logdir",
sky_model_r=None,
sky_model_i=None,
model_regularization=None,
graph_args_dict=None,
**opt_kwargs,
):
"""Run optimization loop to fit gains and foreground components.
Parameters
----------
g_r: tf.Tensor object.
tf.Tensor object holding real parts of gains.
g_i: tf.Tensor object.
tf.Tensor object holding imag parts of gains.
fg_r: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding foreground coeffs.
fg_i: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding imag coeffs.
data_r: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
real part of data to fit.
data_i: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
imag part of data to fit.
wgts: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
fg_comps: list:
list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)
represents vectors to be used in modeling visibilities.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
use_min: bool, optional
if True, use the value that minimizes the loss function
regardless of where optimization loop ended up
(prevents overshooting due to excess momentum)
tol: float, optional
halt optimization loop once the loss changes by less then this value.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
verbose: bool, optional
lots of text output
default is False.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
sky_model_r: list of tf.Tensor objects, optional
chunked tensors containing model in same format as data_r
sky_model_i: list of tf.Tensor objects, optional
chunked tensors containing model in the same format as data_i
model_regularization: str, optional
type of model regularization to perform. Currently support "sum"
where the sums of real and imaginary parts (across all bls and freqs)
are constrained to be the same as the sum of real and imag parts
of data.
opt_kwargs: kwarg dict
additional kwargs for tf.opt.Optimizer(). See tensorflow docs.
Returns
-------
g_r_opt: tf.Tensor object
real part of optimized gains.
g_i_opt: tf.Tensor object
imag part of optimized gains.
fg_r_opt: tf.Tensor object
real part of foreground coeffs.
fg_i_opt: tf.Tensor object.
imag part of optimized foreground coeffs.
fit_history: dict
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
if graph_args_dict is None:
graph_args_dict = {}
# initialize the optimizer.
echo(f"Using {str(dtype)} precision.")
echo(f"{datetime.datetime.now()} Provided the following opt_kwargs")
for k in opt_kwargs:
echo(f"{k}: {opt_kwargs[k]}")
opt = OPTIMIZERS[optimizer](**opt_kwargs)
# set up history recording
fit_history = {"loss": []}
min_loss = 9e99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
# build up list of lists of ant0 and ant1 for gather ops
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if not freeze_model:
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = [g_r, g_i] + fg_r + fg_i
else:
vars = [g_r, g_i]
echo(
f"{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...",
verbose=verbose,
)
if not freeze_model:
echo(
f"Performing gradient descent on total of {int(np.sum([fgr.shape[0] * fgr.shape[1] for fgr in fg_r]))} complex foreground parameters",
verbose=verbose,
)
echo(
f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[str(fgr.shape[:2]) + ':' + str(dc.shape[1]) for fgr, dc in zip(fg_r, data_r)]}",
verbose=verbose,
)
if model_regularization == "sum":
prior_r_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_r[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
prior_i_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_i[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
def loss_function():
return mse_chunked_sum_regularized(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
prior_r_sum=prior_r_sum,
prior_i_sum=prior_i_sum,
)
else:
def loss_function():
return mse_chunked(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if n_profile_steps > 0:
echo(f"{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...")
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace("train", step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(
f"{datetime.datetime.now()} Building Computational Graph...\n",
verbose=verbose,
)
loss = train_step()
echo(
f"{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...\n",
verbose=verbose,
)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history["loss"].append(loss.numpy())
if use_min and fit_history["loss"][-1] < min_loss:
# store the g_r, g_i, fg_r, fg_i values that minimize loss
# in case of overshoot.
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if step >= 1 and np.abs(fit_history["loss"][-1] - fit_history["loss"][-2]) < tol:
echo(
f"Tolerance thresshold met with delta of {np.abs(fit_history['loss'][-1] - fit_history['loss'][-2]):.2e}. Terminating...\n ",
verbose=verbose,
)
break
# if we dont use use_min, then the last
# visited set of parameters will be used
# to set the ML params.
if not use_min:
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(
f"{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...\n",
verbose=verbose,
)
return g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history
def insert_model_into_uvdata_tensor(
uvdata,
time,
polarization,
ants_map,
red_grps,
model_r,
model_i,
scale_factor=1.0,
):
"""Insert fitted tensor values back into uvdata object for tensor mode.
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
red_grps: list of lists of int 2-tuples
a list of lists of 2-tuples where all antenna pairs within each sublist
are redundant with eachother. Assumes that conjugates are correctly taken.
model_r: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data
model_i: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model
scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
Returns
-------
N/A: Modifies uvdata inplace.
"""
antpairs_data = uvdata.get_antpairs()
polnum = np.where(
uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)
)[0][0]
for red_grp in red_grps:
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
if ap in antpairs_data:
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] + 1j * model_i[i, j]
else:
dinds = uvdata.antpair2ind(ap[::-1])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] - 1j * model_i[i, j]
uvdata.data_array[dinds, 0, :, polnum] = model * scale_factor
def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
"""Insert tensorized gains back into uvcal object
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
gains_re: dict with int keys and tf.Tensor object values
dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object
representing the real component of the complex gain for antenna i.
gains_im: dict with int keys and tf.Tensor object values
dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object
representing the imag component of the complex gain for antenna j.
Returns
-------
N/A: Modifies uvcal inplace.
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (
gains_re[ant_index].numpy() + 1j * gains_im[ant_index].numpy()
)
def tensorize_fg_coeffs(
data,
wgts,
fg_model_comps,
notebook_progressbar=False,
verbose=False,
):
"""Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
"""
echo(
f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n",
verbose=verbose,
)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [
tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks)
]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
# set up linear leastsq
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = data[cnum].shape[1] * data[cnum].shape[2]
nvecs = fg_model_comps[cnum].shape[0]
# pad with zeros
for gnum in range(ngrps):
nonzero_rows = np.where(
np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1)
)[0]
if len(nonzero_rows) > 0:
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
# solve linear leastsq
fg_coeff_chunk.append(
tf.reshape(
tf.linalg.lstsq(
tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero],
tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)),
),
(nvecs_nonzero,),
)
)
# pad zeros at the end back up to nvecs.
fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)])
# add two additional dummy indices to satify broadcasting rules.
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(
f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n",
verbose=verbose,
)
return fg_coeffs
def get_auto_weights(uvdata, delay_extent=25.0):
"""
inverse variance weights from interpolated autocorrelation data
Parameters
----------
uvdata: UVData object
UVData object containing autocorrelation data to use for computing inverse noise weights.
offset: float, optional
Fit autocorrelation to delay components with this width.
Returns
-------
data_weights: UVFlag object
UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.
"""
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode="flag")
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
# compute autocorrelation weights
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if bl[0] == bl[1]:
d_wf = uvdata.get_data(bl)
w_wf = ~uvdata.get_flags(bl)
auto_fit_dict[bl] = []
for ds, fs in zip(d_wf, w_wf):
# fit autocorr waterfall to DPSS modes.
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze()
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
# from autocorrelation fits, weights
for bl in bls:
smooth_weights = 1.0 / (auto_fit_dict[bl[0], bl[0], bl[-1]] * auto_fit_dict[bl[1], bl[1], bl[-1]])
smooth_weights *= ~uvdata.get_flags(bl)
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where(
data_weights.polarization_array == uvutils.polstr2num(bl[-1], x_orientation=data_weights.x_orientation)
)[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights
def calibrate_and_model_tensor(
uvdata,
fg_model_comps_dict,
gains=None,
freeze_model=False,
optimizer="Adamax",
tol=1e-14,
maxsteps=10000,
include_autos=False,
verbose=False,
sky_model=None,
dtype=np.float32,
use_min=False,
use_redundancy=False,
notebook_progressbar=False,
correct_resid=False,
correct_model=True,
weights=None,
nsamples_in_weights=True,
graph_mode=False,
grp_size_threshold=5,
n_profile_steps=0,
profile_log_dir="./logdir",
model_regularization="sum",
init_guesses_from_previous_time_step=False,
skip_threshold=0.5,
use_model_snr_weights=False,
**opt_kwargs,
):
"""Perform simultaneous calibration and foreground fitting using tensors.
Parameters
----------
uvdata: UVData object
uvdata objet of data to be calibrated.
fg_model_comps_dict: dictionary
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents
gains: UVCal object
UVCal with initial gain estimates.
There many smart ways to obtain initial gain estimates
but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).
Users can determine initial gains with their favorite established cal algorithm.
default is None -> start with unity gains.
WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
tol: float, optional
halting condition for optimizer loop. Stop loop when the change in the cost function falls
below tol.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
include_autos: bool, optional
include autocorrelations in fitting.
default is False.
verbose: bool, optional
generate lots of text.
default is False.
sky_model: UVData object, optional
a sky-model to use for initial estimates of foreground coeffs and
to set overall flux scale and phases.
Note that this model is not used to obtain initial gain estimates.
These must be provided through the gains argument.
dtype: numpy dtype, optional
the float precision to be used in tensorflow gradient descent.
runtime scales roughly inversely linear with precision.
default is np.float32
use_min: bool, optional
If True, use the set of parameters that determine minimum as the ML params
If False, use the last set of parameters visited by the optimization loop.
use_redundancy: bool, optional
if true, solve for one set of foreground coeffs per redundant baseline group
instead of per baseline.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
red_tol: float, optional
tolerance for determining baselines redundant (meters)
default is 1.0
correct_resid: bool, optional
if True, gain correct residual.
default is False
correct_model: bool, optional
if True, gain correct model.
default is False
weights: UVFlag object, optional.
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is True.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
model_regularization: str, optional
option to regularize model
supported 'post_hoc', 'sum'
default is 'post_hoc'
which sets sum of amps equal and sum of phases equal.
init_guesses_from_previous_time_step: bool, optional
if True, then use foreground coeffs and gains from previous time-step to
initialize gains for next time step.
skip_threshold: float, optional
if less then this fraction of data is unflagged on a particular poltime,
flag the entire poltime.
opt_kwargs: kwarg_dict
kwargs for tf.optimizers
Returns
-------
model: UVData object
uvdata object containing model of the foregrounds
resid: UVData object
uvdata object containing resids which are the data minus
the model with gains multiplied and then with the gains divided out.
gains: UVCal object
uvcal object containing estimates of the gain solutions. These solutions
are not referenced to any sky model and are likely orders of
fit_history:
dictionary containing fit history with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
antpairs_data = uvdata.get_antpairs()
if not include_autos:
antpairs_data = set([ap for ap in antpairs_data if ap[0] != ap[1]])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
# get redundant groups
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if gains is None:
echo(
f"{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...\n",
verbose=verbose,
)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if sky_model is None and model_regularization is not None:
echo(
f"{datetime.datetime.now()} Sky model is None. Initializing from data...\n",
verbose=verbose,
)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for i, ant in enumerate(gains.ant_array)}
# generate tensors to hold foreground components.
fg_model_comps, corr_inds = tensorize_fg_model_comps_dict(
fg_model_comps_dict=fg_model_comps_dict,
ants_map=ants_map,
dtype=dtype,
nfreqs=sky_model.Nfreqs,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
grp_size_threshold=grp_size_threshold,
)
echo(
f"{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...\n",
verbose=verbose,
)
# delete fg_model_comps_dict. It can take up a lot of memory.
del fg_model_comps_dict
# loop through polarization and times.
for polnum, pol in enumerate(uvdata.get_pols()):
echo(
f"{datetime.datetime.now()} Working on pol {pol}, {polnum + 1} of {uvdata.Npols}...\n",
verbose=verbose,
)
fit_history_p = {}
first_time = True
for time_index, time in enumerate(np.unique(uvdata.time_array)):
echo(
f"{datetime.datetime.now()} Working on time {time_index + 1} of {uvdata.Ntimes}...\n",
verbose=verbose,
)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-7, rtol=0.0)
frac_unflagged = np.count_nonzero(~uvdata.flag_array[bltsel, 0, :, polnum]) / (
uvdata.Nbls * uvdata.Nfreqs
)
# check that fraction of unflagged data > skip_threshold.
if frac_unflagged >= skip_threshold:
rmsdata = np.sqrt(
np.mean(
np.abs(uvdata.data_array[bltsel, 0, :, polnum][~uvdata.flag_array[bltsel, 0, :, polnum]]) ** 2.0
)
)
echo(f"{datetime.datetime.now()} Tensorizing data...\n", verbose=verbose)
data_r, data_i, wgts = tensorize_data(
uvdata,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
nsamples_in_weights=nsamples_in_weights,
dtype=dtype,
)
if sky_model is not None:
echo(f"{datetime.datetime.now()} Tensorizing sky model...\n", verbose=verbose)
sky_model_r, sky_model_i, _ = tensorize_data(
sky_model,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
dtype=dtype,
)
else:
sky_model_r, sky_model_i = None, None
if first_time or not init_guesses_from_previous_time_step:
first_time = False
echo(f"{datetime.datetime.now()} Tensorizing Gains...\n", verbose=verbose)
g_r, g_i = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
# generate initial guess for foreground coeffs.
echo(
f"{datetime.datetime.now()} Tensorizing Foreground coeffs...\n",
verbose=verbose,
)
fg_r = tensorize_fg_coeffs(
data=data_r,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
fg_i = tensorize_fg_coeffs(
data=data_i,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for fgr, fgi, fgc in zip(fg_r, fg_i, fg_model_comps)]
wgts = [(tf.square(wm[0]) + tf.square(wm[1])) * w for wm, w in zip(wgts_model, wgts)]
del wgts_model
# renormalize
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [w / wgts_sum for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index],) = fit_gains_and_foregrounds(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
data_r=data_r,
data_i=data_i,
wgts=wgts,
fg_comps=fg_model_comps,
corr_inds=corr_inds,
optimizer=optimizer,
use_min=use_min,
freeze_model=freeze_model,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
tol=tol,
dtype=dtype,
maxsteps=maxsteps,
graph_mode=graph_mode,
n_profile_steps=n_profile_steps,
profile_log_dir=profile_log_dir,
sky_model_r=sky_model_r,
sky_model_i=sky_model_i,
model_regularization=model_regularization,
**opt_kwargs,
)
# insert into model uvdata.
insert_model_into_uvdata_tensor(
uvdata=model,
time=time,
polarization=pol,
ants_map=ants_map,
red_grps=red_grps,
model_r=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_r,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
model_i=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_i,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
scale_factor=rmsdata,
)
# insert gains into uvcal
insert_gains_into_uvcal(
uvcal=gains,
time=time,
polarization=pol,
gains_re=g_r,
gains_im=g_i,
)
else:
echo(
f"{datetime.datetime.now()}: Only {frac_unflagged * 100}-percent of data unflagged. Skipping...\n",
verbose=verbose,
)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = "skipped!"
# normalize on sky model if we use post-hoc regularization
if not freeze_model and model_regularization == "post_hoc" and np.any(~model.flag_array[bltsel]):
renormalize(
uvdata_reference_model=sky_model,
uvdata_deconv=model,
gains=gains,
polarization=pol,
time=time,
additional_flags=uvdata.flag_array,
)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if not correct_model:
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0 # set resid to zero where model is flagged.
resid.data_array[uvdata.flag_array] = 0.0 # also set resid to zero where data is flagged.
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return model, resid, gains, fit_history
def flag_poltime(data_object, time, polarization):
if isinstance(data_object, UVData):
bltsel = np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0)
polnum = np.where(
data_object.polarization_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
data_object.flag_array[bltsel, :, :, polnum] = True
data_object.data_array[bltsel, :, :, polnum] = 0.0
elif isinstance(data_object, UVCal):
polnum = np.where(
data_object.jones_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
gindt = np.where(np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0))[0][0]
data_object.gain_array[:, 0, :, gindt, polnum] = 1.0
data_object.flag_array[:, 0, :, gindt, polnum] = True
else:
raise ValueError("only supports data_object that is UVCal or UVData.")
def calibrate_and_model_mixed(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
ant_dly=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
red_tol_freq=0.5,
n_angle_bins=200,
notebook_progressbar=False,
use_redundancy=False,
use_tensorflow_to_derive_modeling_comps=False,
eigenval_cutoff=1e-10,
dtype_matinv=np.float64,
require_exact_angle_match=True,
angle_match_tol=1e-3,
grp_size_threshold=5,
model_comps_dict=None,
save_dict_to=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors
for baselines with no frequency redundancy and simple_cov components for
groups of baselines that have some frequency redundancy.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
ant_dly: float, optional
intrinsic chromaticity of each antenna element
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
red_tol_freq: float, optional
tolerance for treating two baselines as having some
frequency redundancy. When frequency redundancy exists, baselines
will be modeled jointly.
n_angle_bins: int, optional
number of angular bins to use between -pi and pi to compare baselines
default is 200
notebook_progressbar: bool, optional
if True, show graphical notebook progress bar that looks good in jupyter.
default is False.
use_redundancy: bool, optional
If True, model all baselines within each redundant group with the same components
If False, model each baseline within each redundant group with sepearate components.
default is False.
use_tensorflow_to_derive_modeling_comps: bool, optional
Use tensorflow methods to derive multi-baseline modeling components.
recommended if you have a GPU with enough memory to perform spectral decomposition
of multi-baseline covariance matrices.
eigenval_cutoff: float, optional
threshold of eigenvectors to include in modeling components.
dtype_matinv: numpy.dtype, optional
data type to use for deriving modeling components.
default is np.float64 (need higher precision for cov-mat like calculation)
grp_size_threshold: int, optional
groups with number of elements less then this value are split up into single baselines.
default is 5.
model_comps_dict: dict, optional
dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps
for more specifics.
default is None -> compute fitting groups automatically.
save_dict_to: str, optional
save model_comps_dict to hdf5 container if True
default is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_tensor.
see docstring of calibrate_and_model_tensor.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
# get fitting groups
fitting_grps, blvecs, _, _ = modeling.get_uv_overlapping_grps_conjugated(
uvdata,
red_tol=red_tol,
include_autos=include_autos,
red_tol_freq=red_tol_freq,
n_angle_bins=n_angle_bins,
notebook_progressbar=notebook_progressbar,
require_exact_angle_match=require_exact_angle_match,
angle_match_tol=angle_match_tol,
)
if model_comps_dict is None:
model_comps_dict = modeling.yield_mixed_comps(
fitting_grps,
blvecs,
uvdata.freq_array[0],
eigenval_cutoff=eigenval_cutoff,
use_tensorflow=use_tensorflow_to_derive_modeling_comps,
ant_dly=ant_dly,
horizon=horizon,
offset=offset,
min_dly=min_dly,
verbose=verbose,
dtype=dtype_matinv,
notebook_progressbar=notebook_progressbar,
grp_size_threshold=grp_size_threshold,
)
if save_dict_to is not None:
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
# MASKED: calibrate_and_model_dpss function (lines 1502-1583)
def fg_model(fg_r, fg_i, fg_comps):
vr = tf.reduce_sum(fg_r * fg_comps, axis=0)
vi = tf.reduce_sum(fg_i * fg_comps, axis=0)
return vr, vi
def data_model(g_r, g_i, fg_r, fg_i, fg_comps, ant0_inds, ant1_inds):
gr0 = tf.gather(g_r, ant0_inds)
gr1 = tf.gather(g_r, ant1_inds)
gi0 = tf.gather(g_i, ant0_inds)
gi1 = tf.gather(g_i, ant1_inds)
grgr = gr0 * gr1
gigi = gi0 * gi1
grgi = gr0 * gi1
gigr = gi0 * gr1
vr, vi = fg_model(fg_r, fg_i, fg_comps)
model_r = (grgr + gigi) * vr + (grgi - gigr) * vi
model_i = (gigr - grgi) * vr + (grgr + gigi) * vi
return model_r, model_i
def mse(model_r, model_i, data_r, data_i, wgts):
return tf.reduce_sum((tf.square(data_r - model_r) + tf.square(data_i - model_i)) * wgts)
def mse_chunked(g_r, g_i, fg_r, fg_i, fg_comps, nchunks, data_r, data_i, wgts, ant0_inds, ant1_inds, dtype=np.float32):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return tf.reduce_sum(tf.stack(cal_loss))
def mse_chunked_sum_regularized(
g_r,
g_i,
fg_r,
fg_i,
fg_comps,
nchunks,
data_r,
data_i,
wgts,
ant0_inds,
ant1_inds,
prior_r_sum,
prior_i_sum,
dtype=np.float32,
):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_i_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_r_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
# compute sum of real and imag parts x weights for regularization.
model_r_sum[cnum] += tf.reduce_sum(model_r * wgts[cnum])
model_i_sum[cnum] += tf.reduce_sum(model_i * wgts[cnum])
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return (
tf.reduce_sum(tf.stack(cal_loss))
+ tf.square(tf.reduce_sum(tf.stack(model_r_sum)) - prior_r_sum)
+ tf.square(tf.reduce_sum(tf.stack(model_i_sum)) - prior_i_sum)
)
def read_calibrate_and_model_dpss(
input_data_files,
input_model_files=None,
input_gain_files=None,
resid_outfilename=None,
gain_outfilename=None,
model_outfilename=None,
fitted_info_outfilename=None,
x_orientation="east",
clobber=False,
bllen_min=0.0,
bllen_max=np.inf,
bl_ew_min=0.0,
ex_ants=None,
select_ants=None,
gpu_index=None,
gpu_memory_limit=None,
precision=32,
use_autocorrs_in_weights=False,
**calibration_kwargs,
):
"""
Driver function for using calamity with DPSS modeling.
Parameters
----------
input_data_files: list of strings or UVData object.
list of paths to input files to read in and calibrate.
input_model_files: list of strings or UVData object, optional
list of paths to model files for overal phase/amp reference.
Default is None -> use input files as model for overall
phase and amplitude calibration.
input_gain_files: list of strings or UVCal object, optional
list of paths to gain files to use as initial guesses for calibration.
resid_outfilename: str, optional
path for file to write residuals.
default is None -> don't write out residuals.
gain_outfilename: str, optional
path to gain calfits to write fitted gains.
default is None -> don't write out gains.
model_outfilename, str, optional
path to file to write model output.
default is None -> Don't write model.
fitting_info_outfilename, str, optional
string to pickel fitting info to.
n_output_chunks: int optional
split up outputs into n_output_chunks chunked by time.
default is None -> write single output file.
bllen_min: float, optional
select all baselines with length greater then this value [meters].
default is 0.0
bllen_max: float, optional
select only baselines with length less then this value [meters].
default is np.inf.
bl_ew_min: float, optional
select all baselines with EW projected length greater then this value [meters].
default is 0.0
gpu_index: int, optional
limit visible GPUs to be the index of this GPU.
default: None -> all GPUs are visible.
gpu_memory_limit: float, optional
GiB of memory on GPU that can be used.
default None -> all memory available.
use_autocorrs_in_weights: bool, optional
if True, use smooth fits to autocorrelations as
inverse variance weights.
default is False.
calibration_kwargs: kwarg dict
see kwrags for calibration_and_model_dpss()
Returns
-------
model_fit: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid_fit: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains_fit: UVCal object
uvcal object containing fitted gains.
fit_info:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
gpus = tf.config.list_physical_devices("GPU")
if gpu_index is not None:
# See https://www.tensorflow.org/guide/gpu
if gpus:
if gpu_memory_limit is None:
tf.config.set_visible_devices(gpus[gpu_index], "GPU")
else:
tf.config.set_logical_device_configuration(
gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=gpu_memory_limit * 1024)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(
uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants
)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if input_model_files is not None:
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if uvd_model is not None:
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if input_gain_files is not None:
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
# run calibration with specified GPU device.
dtype = {32: np.float32, 64: np.float64}[precision]
if gpu_index is not None and gpus:
with tf.device(f"/device:GPU:{gpus[gpu_index].name[-1]}"):
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
else:
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
if resid_outfilename is not None:
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if gain_outfilename is not None:
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if model_outfilename is not None:
model_fit.write_uvh5(model_outfilename, clobber=clobber)
# don't write fitting_info_outfilename for now.
fit_info["calibration_kwargs"] = calibration_kwargs
fit_info["calibration_kwargs"]["dtype"] = dtype
# don't write fitting_info_outfilename for now.
return model_fit, resid_fit, gains_fit, fit_info
def input_output_parser():
ap = argparse.ArgumentParser()
sp = ap.add_argument_group("Input and Output Arguments.")
sp.add_argument("--input_data_files", type=str, nargs="+", help="paths to data files to calibrate.", required=True)
sp.add_argument(
"--input_model_files", type=str, nargs="+", help="paths to model files to set overal amplitude and phase."
)
sp.add_argument("--input_gain_files", type=str, nargs="+", help="paths to gains to use as a staring point.")
sp.add_argument("--resid_outfilename", type=str, default=None, help="postfix for resid output file.")
sp.add_argument("--model_outfilename", type=str, default=None, help="postfix for foreground model file.")
sp.add_argument("--gain_outfilename", type=str, default=None, help="path for writing fitted gains.")
sp.add_argument("--clobber", action="store_true", default="False", help="Overwrite existing outputs.")
sp.add_argument("--x_orientation", default="east", type=str, help="x_orientation of feeds to set in output gains.")
sp.add_argument(
"--bllen_min", default=0.0, type=float, help="minimum baseline length to include in calibration and outputs."
)
sp.add_argument(
"--bllen_max", default=np.inf, type=float, help="maximum baseline length to include in calbration and outputs."
)
sp.add_argument(
"--bl_ew_min",
default=0.0,
type=float,
help="minimum EW baseline component to include in calibration and outputs.",
)
sp.add_argument(
"--ex_ants", default=None, type=int, nargs="+", help="Antennas to exclude from calibration and modeling."
)
sp.add_argument(
"--select_ants",
default=None,
type=int,
nargs="+",
help="Antennas to select exclusively for calibration and modeling.",
)
sp.add_argument("--gpu_index", default=None, type=int, help="Index of GPU to run on (if on a multi-GPU machine).")
sp.add_argument("--gpu_memory_limit", default=None, type=int, help="Limit GPU memory use to this many GBytes.")
sp.add_argument("--precision", default=32, type=int, help="Number of bits to keep track of.")
return ap
def fitting_argparser():
ap = input_output_parser()
sp = ap.add_argument_group("General Fitting Arguments.")
sp.add_argument(
"--tol",
type=float,
default=1e-14,
help="Stop gradient descent after cost function converges to within this value.",
)
sp.add_argument(
"--optimizer", type=str, default="Adamax", help="First order optimizer to use for gradient descent."
)
sp.add_argument("--maxsteps", type=int, default=10000, help="Max number of steps to iterate during optimization.")
sp.add_argument("--verbose", default=False, action="store_true", help="lots of text ouputs.")
sp.add_argument(
"--use_min",
default=False,
action="store_true",
help="Use params for mimimum cost function derived. Otherwise, use the params last visited by the descent. Avoids momentum overshoot.",
)
sp.add_argument(
"--use_redundancy",
default=False,
action="store_true",
help="Model redundant visibilities with the same set of foreground parameters.",
)
sp.add_argument(
"--correct_model", default=True, action="store_true", help="Remove gain effects from foreground model."
)
sp.add_argument(
"--correct_resid", default=False, action="store_true", help="Apply fitted gains to the fitted residuals."
)
sp.add_argument(
"--graph_mode",
default=False,
action="store_true",
help="Pre-compile computational graph before running gradient descent. Not reccomended for GPUs.",
)
sp.add_argument(
"--init_guesses_from_previous_time_step",
default=False,
action="store_true",
help="initialize gain and foreground guesses from previous time step when calibrating multiple times.",
)
sp.add_argument("--learning_rate", type=float, default=1e-2, help="gradient descent learning rate.")
sp.add_argument(
"--red_tol", type=float, default=1.0, help="Tolerance for determining redundancy between baselines [meters]."
)
sp.add_argument(
"--skip_threshold",
type=float,
default=0.5,
help="Skip and flag time/polarization if more then this fractionf of data is flagged.",
)
sp.add_argument("--model_regularization", type=str, default="post_hoc")
sp.add_argument(
"--nsamples_in_weights", default=False, action="store_true", help="Weight contributions to MSE by nsamples."
)
sp.add_argument(
"--use_model_snr_weights",
default=False,
action="store_true",
help="If True, weight contributions to MSE as proportional to SNR.",
)
sp.add_argument(
"--use_autocorrs_in_weights",
default=False,
action="store_true",
help="If True, use autocorrelations to derive relative SNR weights.",
)
return ap
def dpss_fit_argparser():
ap = fitting_argparser()
sp = ap.add_argument_group("DPSS Specific Fitting Arguments.")
sp.add_argument("--horizon", default=1.0, type=float, help="Fraction of horizon delay to model with DPSS modes.")
sp.add_argument("--min_dly", default=0.0, type=float, help="Minimum delay [ns] to model with DPSS modes.")
sp.add_argument(
"--offset", default=0.0, type=float, help="Offset from horizon delay [ns] to model with DPSS modes."
)
return ap
|
def calibrate_and_model_dpss(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
notebook_progressbar=False,
fg_model_comps_dict=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(
uvdata,
horizon=horizon,
min_dly=min_dly,
offset=offset,
include_autos=include_autos,
red_tol=red_tol,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=dpss_model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
| 1,502 | 1,583 |
import numpy as np
import tensorflow as tf
from pyuvdata import UVData, UVCal, UVFlag
from . import utils
import copy
import argparse
import itertools
import datetime
from pyuvdata import utils as uvutils
from .utils import echo
from .utils import PBARS
from . import cal_utils
from . import modeling
import re
OPTIMIZERS = {
"Adadelta": tf.optimizers.Adadelta,
"Adam": tf.optimizers.Adam,
"Adamax": tf.optimizers.Adamax,
"Ftrl": tf.optimizers.Ftrl,
"Nadam": tf.optimizers.Nadam,
"SGD": tf.optimizers.SGD,
"RMSprop": tf.optimizers.RMSprop,
"Adagrad": tf.optimizers.Adagrad
}
def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"""
Order dict keys in order of number of baselines in each group
chunk fit_groups in fg_model_comps_dict into chunks where all groups in the
same chunk have the same number of baselines in each group.
Parameters
----------
fg_model_comps_dict: dict
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
use_redundancy: bool, optional
If False, break fitting groups with the same number of baselines in each redundant
sub_group into different fitting groups with no redundancy in each
redundant subgroup. This is to prevent fitting groups with single
redundant groups of varying lengths from being lumped into different chunks
increasing the number of chunks has a more significant impact on run-time
then increasing the number of baselines in each chunk.
default is False.
Returns:
fg_model_comps_dict_chunked: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
"""
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if not use_redundancy:
# We can remove redundancies for fitting groups of baselines that have the same
# number of elements in each redundant group.
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
# only break up groups with small numbers of group elements.
if np.allclose(rlens, np.mean(rlens)) and len(rlens) < grp_size_threshold:
# split up groups.
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if nbl in chunked_keys:
chunked_keys[nbl].append(fit_grp)
if fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]:
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked
def tensorize_fg_model_comps_dict(
fg_model_comps_dict,
ants_map,
nfreqs,
use_redundancy=False,
dtype=np.float32,
notebook_progressbar=False,
verbose=False,
grp_size_threshold=5,
):
"""Convert per-baseline model components into a Ndata x Ncomponent tensor
Parameters
----------
fg_model_comps_dict: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
nfreqs: int, optional
number of frequency channels
dtype: numpy.dtype
tensor data types
default is np.float32
Returns
-------
fg_model_comps: list
list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)
where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same
modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls
to be the maximum number of vectors representing any of the ngrps baseline grps
which means that many rows in nvecs will be zero. For example, if we are modeling with
vectors that all span nbls=1 baseline and using delay-modes to model our data
then nvecs will equal the largest number of delay modes necessary to model the wedge
on all baselines even though the short baselines are described by far fewer modes
on short baselines, most of the rows along the vector dimension will therefor be zero.
This is wasteful of memory but it allows us to take advantage of the fast
dense matrix operations on a GPU.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
"""
echo(
f"{datetime.datetime.now()} Computing foreground components matrices...\n",
verbose=verbose,
)
# chunk foreground components.
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(
fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold
)
fg_model_comps = []
corr_inds = []
for nbls, nvecs in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for grpnum, modeling_grp in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for rgrpnum, red_grp in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice(rgrpnum * nfreqs, (rgrpnum + 1) * nfreqs)
dslice = slice(nbl * nfreqs, (nbl + 1) * nfreqs)
modeling_matrix[vecslice, grpnum, nbl] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][
compslice
].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return fg_model_comps, corr_inds
def tensorize_data(
uvdata,
corr_inds,
ants_map,
polarization,
time,
data_scale_factor=1.0,
weights=None,
nsamples_in_weights=False,
dtype=np.float32,
):
"""Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples.
"""
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = ants_map_inv[i], ants_map_inv[j]
bl = ap + (polarization,)
dinds1, dinds2, pol_ind = uvdata._key2inds(bl)
if len(dinds1) > 0:
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze()
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[i, j] = data.real.astype(dtype)
data_i[i, j] = data.imag.astype(dtype)
if weights is None:
wgts[i, j] = iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
else:
if ap in weights.get_antpairs():
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::-1])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]]
polnum = np.where(
weights.polarization_array
== uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)
)[0][0]
wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
wgtsum += np.sum(wgts[i, j])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return data_r, data_i, wgts
def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
"""Remove arbitrary phase and amplitude from deconvolved model and gains.
Parameters
----------
uvdata_reference_model: UVData object
Reference model for "true" visibilities.
uvdata_deconv: UVData object
"Deconvolved" data solved for in self-cal loop.
gains: UVCal object
Gains solved for in self-cal loop.
polarization: str
Polarization string to compute phase and amplitude correction for.
additional_flags: np.ndarray
Any additional flags you wish to use for excluding data from normalization
fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.
default is None -> Only exclude data in flags from reference model and deconv from
determinging normalization.
Returns
-------
N/A: Modifies uvdata_deconv and gains in-place.
"""
# compute and multiply out scale-factor accounting for overall amplitude and phase degeneracy.
polnum_data = np.where(
uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-7, rtol=0.0)
selection = (
~uvdata_deconv.flag_array[bltsel, :, :, polnum_data]
& ~uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]
)
if additional_flags is not None:
selection = selection & ~additional_flags[bltsel, :, :, polnum_data]
data_ratio = (
uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection]
/ uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection]
)
data_ratio[~np.isfinite(data_ratio)] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean(np.abs(data_ratio) ** 2.0))
scale_factor = scale_factor_abs # * np.exp(1j * scale_factor_phase) Need to figure this out later.
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where(
gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor) ** -0.5
def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
"""Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return gains_re, gains_im
def yield_fg_model_array(
nants,
nfreqs,
fg_model_comps,
fg_coeffs,
corr_inds,
):
"""Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data
"""
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum(fg_coeffs[cnum] * fg_model_comps[cnum], axis=0).numpy()
for gnum in range(ngrps):
for blnum, (i, j) in enumerate(corr_inds[cnum][gnum]):
model[i, j] = gchunk[gnum, blnum]
return model
def fit_gains_and_foregrounds(
g_r,
g_i,
fg_r,
fg_i,
data_r,
data_i,
wgts,
fg_comps,
corr_inds,
use_min=False,
tol=1e-14,
maxsteps=10000,
optimizer="Adamax",
freeze_model=False,
verbose=False,
notebook_progressbar=False,
dtype=np.float32,
graph_mode=False,
n_profile_steps=0,
profile_log_dir="./logdir",
sky_model_r=None,
sky_model_i=None,
model_regularization=None,
graph_args_dict=None,
**opt_kwargs,
):
"""Run optimization loop to fit gains and foreground components.
Parameters
----------
g_r: tf.Tensor object.
tf.Tensor object holding real parts of gains.
g_i: tf.Tensor object.
tf.Tensor object holding imag parts of gains.
fg_r: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding foreground coeffs.
fg_i: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding imag coeffs.
data_r: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
real part of data to fit.
data_i: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
imag part of data to fit.
wgts: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
fg_comps: list:
list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)
represents vectors to be used in modeling visibilities.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
use_min: bool, optional
if True, use the value that minimizes the loss function
regardless of where optimization loop ended up
(prevents overshooting due to excess momentum)
tol: float, optional
halt optimization loop once the loss changes by less then this value.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
verbose: bool, optional
lots of text output
default is False.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
sky_model_r: list of tf.Tensor objects, optional
chunked tensors containing model in same format as data_r
sky_model_i: list of tf.Tensor objects, optional
chunked tensors containing model in the same format as data_i
model_regularization: str, optional
type of model regularization to perform. Currently support "sum"
where the sums of real and imaginary parts (across all bls and freqs)
are constrained to be the same as the sum of real and imag parts
of data.
opt_kwargs: kwarg dict
additional kwargs for tf.opt.Optimizer(). See tensorflow docs.
Returns
-------
g_r_opt: tf.Tensor object
real part of optimized gains.
g_i_opt: tf.Tensor object
imag part of optimized gains.
fg_r_opt: tf.Tensor object
real part of foreground coeffs.
fg_i_opt: tf.Tensor object.
imag part of optimized foreground coeffs.
fit_history: dict
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
if graph_args_dict is None:
graph_args_dict = {}
# initialize the optimizer.
echo(f"Using {str(dtype)} precision.")
echo(f"{datetime.datetime.now()} Provided the following opt_kwargs")
for k in opt_kwargs:
echo(f"{k}: {opt_kwargs[k]}")
opt = OPTIMIZERS[optimizer](**opt_kwargs)
# set up history recording
fit_history = {"loss": []}
min_loss = 9e99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
# build up list of lists of ant0 and ant1 for gather ops
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if not freeze_model:
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = [g_r, g_i] + fg_r + fg_i
else:
vars = [g_r, g_i]
echo(
f"{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...",
verbose=verbose,
)
if not freeze_model:
echo(
f"Performing gradient descent on total of {int(np.sum([fgr.shape[0] * fgr.shape[1] for fgr in fg_r]))} complex foreground parameters",
verbose=verbose,
)
echo(
f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[str(fgr.shape[:2]) + ':' + str(dc.shape[1]) for fgr, dc in zip(fg_r, data_r)]}",
verbose=verbose,
)
if model_regularization == "sum":
prior_r_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_r[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
prior_i_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_i[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
def loss_function():
return mse_chunked_sum_regularized(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
prior_r_sum=prior_r_sum,
prior_i_sum=prior_i_sum,
)
else:
def loss_function():
return mse_chunked(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if n_profile_steps > 0:
echo(f"{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...")
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace("train", step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(
f"{datetime.datetime.now()} Building Computational Graph...\n",
verbose=verbose,
)
loss = train_step()
echo(
f"{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...\n",
verbose=verbose,
)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history["loss"].append(loss.numpy())
if use_min and fit_history["loss"][-1] < min_loss:
# store the g_r, g_i, fg_r, fg_i values that minimize loss
# in case of overshoot.
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if step >= 1 and np.abs(fit_history["loss"][-1] - fit_history["loss"][-2]) < tol:
echo(
f"Tolerance thresshold met with delta of {np.abs(fit_history['loss'][-1] - fit_history['loss'][-2]):.2e}. Terminating...\n ",
verbose=verbose,
)
break
# if we dont use use_min, then the last
# visited set of parameters will be used
# to set the ML params.
if not use_min:
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(
f"{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...\n",
verbose=verbose,
)
return g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history
def insert_model_into_uvdata_tensor(
uvdata,
time,
polarization,
ants_map,
red_grps,
model_r,
model_i,
scale_factor=1.0,
):
"""Insert fitted tensor values back into uvdata object for tensor mode.
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
red_grps: list of lists of int 2-tuples
a list of lists of 2-tuples where all antenna pairs within each sublist
are redundant with eachother. Assumes that conjugates are correctly taken.
model_r: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data
model_i: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model
scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
Returns
-------
N/A: Modifies uvdata inplace.
"""
antpairs_data = uvdata.get_antpairs()
polnum = np.where(
uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)
)[0][0]
for red_grp in red_grps:
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
if ap in antpairs_data:
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] + 1j * model_i[i, j]
else:
dinds = uvdata.antpair2ind(ap[::-1])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] - 1j * model_i[i, j]
uvdata.data_array[dinds, 0, :, polnum] = model * scale_factor
def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
"""Insert tensorized gains back into uvcal object
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
gains_re: dict with int keys and tf.Tensor object values
dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object
representing the real component of the complex gain for antenna i.
gains_im: dict with int keys and tf.Tensor object values
dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object
representing the imag component of the complex gain for antenna j.
Returns
-------
N/A: Modifies uvcal inplace.
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (
gains_re[ant_index].numpy() + 1j * gains_im[ant_index].numpy()
)
def tensorize_fg_coeffs(
data,
wgts,
fg_model_comps,
notebook_progressbar=False,
verbose=False,
):
"""Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
"""
echo(
f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n",
verbose=verbose,
)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [
tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks)
]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
# set up linear leastsq
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = data[cnum].shape[1] * data[cnum].shape[2]
nvecs = fg_model_comps[cnum].shape[0]
# pad with zeros
for gnum in range(ngrps):
nonzero_rows = np.where(
np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1)
)[0]
if len(nonzero_rows) > 0:
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
# solve linear leastsq
fg_coeff_chunk.append(
tf.reshape(
tf.linalg.lstsq(
tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero],
tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)),
),
(nvecs_nonzero,),
)
)
# pad zeros at the end back up to nvecs.
fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)])
# add two additional dummy indices to satify broadcasting rules.
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(
f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n",
verbose=verbose,
)
return fg_coeffs
def get_auto_weights(uvdata, delay_extent=25.0):
"""
inverse variance weights from interpolated autocorrelation data
Parameters
----------
uvdata: UVData object
UVData object containing autocorrelation data to use for computing inverse noise weights.
offset: float, optional
Fit autocorrelation to delay components with this width.
Returns
-------
data_weights: UVFlag object
UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.
"""
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode="flag")
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
# compute autocorrelation weights
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if bl[0] == bl[1]:
d_wf = uvdata.get_data(bl)
w_wf = ~uvdata.get_flags(bl)
auto_fit_dict[bl] = []
for ds, fs in zip(d_wf, w_wf):
# fit autocorr waterfall to DPSS modes.
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze()
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
# from autocorrelation fits, weights
for bl in bls:
smooth_weights = 1.0 / (auto_fit_dict[bl[0], bl[0], bl[-1]] * auto_fit_dict[bl[1], bl[1], bl[-1]])
smooth_weights *= ~uvdata.get_flags(bl)
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where(
data_weights.polarization_array == uvutils.polstr2num(bl[-1], x_orientation=data_weights.x_orientation)
)[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights
def calibrate_and_model_tensor(
uvdata,
fg_model_comps_dict,
gains=None,
freeze_model=False,
optimizer="Adamax",
tol=1e-14,
maxsteps=10000,
include_autos=False,
verbose=False,
sky_model=None,
dtype=np.float32,
use_min=False,
use_redundancy=False,
notebook_progressbar=False,
correct_resid=False,
correct_model=True,
weights=None,
nsamples_in_weights=True,
graph_mode=False,
grp_size_threshold=5,
n_profile_steps=0,
profile_log_dir="./logdir",
model_regularization="sum",
init_guesses_from_previous_time_step=False,
skip_threshold=0.5,
use_model_snr_weights=False,
**opt_kwargs,
):
"""Perform simultaneous calibration and foreground fitting using tensors.
Parameters
----------
uvdata: UVData object
uvdata objet of data to be calibrated.
fg_model_comps_dict: dictionary
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents
gains: UVCal object
UVCal with initial gain estimates.
There many smart ways to obtain initial gain estimates
but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).
Users can determine initial gains with their favorite established cal algorithm.
default is None -> start with unity gains.
WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
tol: float, optional
halting condition for optimizer loop. Stop loop when the change in the cost function falls
below tol.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
include_autos: bool, optional
include autocorrelations in fitting.
default is False.
verbose: bool, optional
generate lots of text.
default is False.
sky_model: UVData object, optional
a sky-model to use for initial estimates of foreground coeffs and
to set overall flux scale and phases.
Note that this model is not used to obtain initial gain estimates.
These must be provided through the gains argument.
dtype: numpy dtype, optional
the float precision to be used in tensorflow gradient descent.
runtime scales roughly inversely linear with precision.
default is np.float32
use_min: bool, optional
If True, use the set of parameters that determine minimum as the ML params
If False, use the last set of parameters visited by the optimization loop.
use_redundancy: bool, optional
if true, solve for one set of foreground coeffs per redundant baseline group
instead of per baseline.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
red_tol: float, optional
tolerance for determining baselines redundant (meters)
default is 1.0
correct_resid: bool, optional
if True, gain correct residual.
default is False
correct_model: bool, optional
if True, gain correct model.
default is False
weights: UVFlag object, optional.
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is True.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
model_regularization: str, optional
option to regularize model
supported 'post_hoc', 'sum'
default is 'post_hoc'
which sets sum of amps equal and sum of phases equal.
init_guesses_from_previous_time_step: bool, optional
if True, then use foreground coeffs and gains from previous time-step to
initialize gains for next time step.
skip_threshold: float, optional
if less then this fraction of data is unflagged on a particular poltime,
flag the entire poltime.
opt_kwargs: kwarg_dict
kwargs for tf.optimizers
Returns
-------
model: UVData object
uvdata object containing model of the foregrounds
resid: UVData object
uvdata object containing resids which are the data minus
the model with gains multiplied and then with the gains divided out.
gains: UVCal object
uvcal object containing estimates of the gain solutions. These solutions
are not referenced to any sky model and are likely orders of
fit_history:
dictionary containing fit history with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
antpairs_data = uvdata.get_antpairs()
if not include_autos:
antpairs_data = set([ap for ap in antpairs_data if ap[0] != ap[1]])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
# get redundant groups
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if gains is None:
echo(
f"{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...\n",
verbose=verbose,
)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if sky_model is None and model_regularization is not None:
echo(
f"{datetime.datetime.now()} Sky model is None. Initializing from data...\n",
verbose=verbose,
)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for i, ant in enumerate(gains.ant_array)}
# generate tensors to hold foreground components.
fg_model_comps, corr_inds = tensorize_fg_model_comps_dict(
fg_model_comps_dict=fg_model_comps_dict,
ants_map=ants_map,
dtype=dtype,
nfreqs=sky_model.Nfreqs,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
grp_size_threshold=grp_size_threshold,
)
echo(
f"{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...\n",
verbose=verbose,
)
# delete fg_model_comps_dict. It can take up a lot of memory.
del fg_model_comps_dict
# loop through polarization and times.
for polnum, pol in enumerate(uvdata.get_pols()):
echo(
f"{datetime.datetime.now()} Working on pol {pol}, {polnum + 1} of {uvdata.Npols}...\n",
verbose=verbose,
)
fit_history_p = {}
first_time = True
for time_index, time in enumerate(np.unique(uvdata.time_array)):
echo(
f"{datetime.datetime.now()} Working on time {time_index + 1} of {uvdata.Ntimes}...\n",
verbose=verbose,
)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-7, rtol=0.0)
frac_unflagged = np.count_nonzero(~uvdata.flag_array[bltsel, 0, :, polnum]) / (
uvdata.Nbls * uvdata.Nfreqs
)
# check that fraction of unflagged data > skip_threshold.
if frac_unflagged >= skip_threshold:
rmsdata = np.sqrt(
np.mean(
np.abs(uvdata.data_array[bltsel, 0, :, polnum][~uvdata.flag_array[bltsel, 0, :, polnum]]) ** 2.0
)
)
echo(f"{datetime.datetime.now()} Tensorizing data...\n", verbose=verbose)
data_r, data_i, wgts = tensorize_data(
uvdata,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
nsamples_in_weights=nsamples_in_weights,
dtype=dtype,
)
if sky_model is not None:
echo(f"{datetime.datetime.now()} Tensorizing sky model...\n", verbose=verbose)
sky_model_r, sky_model_i, _ = tensorize_data(
sky_model,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
dtype=dtype,
)
else:
sky_model_r, sky_model_i = None, None
if first_time or not init_guesses_from_previous_time_step:
first_time = False
echo(f"{datetime.datetime.now()} Tensorizing Gains...\n", verbose=verbose)
g_r, g_i = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
# generate initial guess for foreground coeffs.
echo(
f"{datetime.datetime.now()} Tensorizing Foreground coeffs...\n",
verbose=verbose,
)
fg_r = tensorize_fg_coeffs(
data=data_r,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
fg_i = tensorize_fg_coeffs(
data=data_i,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for fgr, fgi, fgc in zip(fg_r, fg_i, fg_model_comps)]
wgts = [(tf.square(wm[0]) + tf.square(wm[1])) * w for wm, w in zip(wgts_model, wgts)]
del wgts_model
# renormalize
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [w / wgts_sum for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index],) = fit_gains_and_foregrounds(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
data_r=data_r,
data_i=data_i,
wgts=wgts,
fg_comps=fg_model_comps,
corr_inds=corr_inds,
optimizer=optimizer,
use_min=use_min,
freeze_model=freeze_model,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
tol=tol,
dtype=dtype,
maxsteps=maxsteps,
graph_mode=graph_mode,
n_profile_steps=n_profile_steps,
profile_log_dir=profile_log_dir,
sky_model_r=sky_model_r,
sky_model_i=sky_model_i,
model_regularization=model_regularization,
**opt_kwargs,
)
# insert into model uvdata.
insert_model_into_uvdata_tensor(
uvdata=model,
time=time,
polarization=pol,
ants_map=ants_map,
red_grps=red_grps,
model_r=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_r,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
model_i=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_i,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
scale_factor=rmsdata,
)
# insert gains into uvcal
insert_gains_into_uvcal(
uvcal=gains,
time=time,
polarization=pol,
gains_re=g_r,
gains_im=g_i,
)
else:
echo(
f"{datetime.datetime.now()}: Only {frac_unflagged * 100}-percent of data unflagged. Skipping...\n",
verbose=verbose,
)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = "skipped!"
# normalize on sky model if we use post-hoc regularization
if not freeze_model and model_regularization == "post_hoc" and np.any(~model.flag_array[bltsel]):
renormalize(
uvdata_reference_model=sky_model,
uvdata_deconv=model,
gains=gains,
polarization=pol,
time=time,
additional_flags=uvdata.flag_array,
)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if not correct_model:
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0 # set resid to zero where model is flagged.
resid.data_array[uvdata.flag_array] = 0.0 # also set resid to zero where data is flagged.
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return model, resid, gains, fit_history
def flag_poltime(data_object, time, polarization):
if isinstance(data_object, UVData):
bltsel = np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0)
polnum = np.where(
data_object.polarization_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
data_object.flag_array[bltsel, :, :, polnum] = True
data_object.data_array[bltsel, :, :, polnum] = 0.0
elif isinstance(data_object, UVCal):
polnum = np.where(
data_object.jones_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
gindt = np.where(np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0))[0][0]
data_object.gain_array[:, 0, :, gindt, polnum] = 1.0
data_object.flag_array[:, 0, :, gindt, polnum] = True
else:
raise ValueError("only supports data_object that is UVCal or UVData.")
def calibrate_and_model_mixed(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
ant_dly=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
red_tol_freq=0.5,
n_angle_bins=200,
notebook_progressbar=False,
use_redundancy=False,
use_tensorflow_to_derive_modeling_comps=False,
eigenval_cutoff=1e-10,
dtype_matinv=np.float64,
require_exact_angle_match=True,
angle_match_tol=1e-3,
grp_size_threshold=5,
model_comps_dict=None,
save_dict_to=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors
for baselines with no frequency redundancy and simple_cov components for
groups of baselines that have some frequency redundancy.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
ant_dly: float, optional
intrinsic chromaticity of each antenna element
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
red_tol_freq: float, optional
tolerance for treating two baselines as having some
frequency redundancy. When frequency redundancy exists, baselines
will be modeled jointly.
n_angle_bins: int, optional
number of angular bins to use between -pi and pi to compare baselines
default is 200
notebook_progressbar: bool, optional
if True, show graphical notebook progress bar that looks good in jupyter.
default is False.
use_redundancy: bool, optional
If True, model all baselines within each redundant group with the same components
If False, model each baseline within each redundant group with sepearate components.
default is False.
use_tensorflow_to_derive_modeling_comps: bool, optional
Use tensorflow methods to derive multi-baseline modeling components.
recommended if you have a GPU with enough memory to perform spectral decomposition
of multi-baseline covariance matrices.
eigenval_cutoff: float, optional
threshold of eigenvectors to include in modeling components.
dtype_matinv: numpy.dtype, optional
data type to use for deriving modeling components.
default is np.float64 (need higher precision for cov-mat like calculation)
grp_size_threshold: int, optional
groups with number of elements less then this value are split up into single baselines.
default is 5.
model_comps_dict: dict, optional
dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps
for more specifics.
default is None -> compute fitting groups automatically.
save_dict_to: str, optional
save model_comps_dict to hdf5 container if True
default is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_tensor.
see docstring of calibrate_and_model_tensor.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
# get fitting groups
fitting_grps, blvecs, _, _ = modeling.get_uv_overlapping_grps_conjugated(
uvdata,
red_tol=red_tol,
include_autos=include_autos,
red_tol_freq=red_tol_freq,
n_angle_bins=n_angle_bins,
notebook_progressbar=notebook_progressbar,
require_exact_angle_match=require_exact_angle_match,
angle_match_tol=angle_match_tol,
)
if model_comps_dict is None:
model_comps_dict = modeling.yield_mixed_comps(
fitting_grps,
blvecs,
uvdata.freq_array[0],
eigenval_cutoff=eigenval_cutoff,
use_tensorflow=use_tensorflow_to_derive_modeling_comps,
ant_dly=ant_dly,
horizon=horizon,
offset=offset,
min_dly=min_dly,
verbose=verbose,
dtype=dtype_matinv,
notebook_progressbar=notebook_progressbar,
grp_size_threshold=grp_size_threshold,
)
if save_dict_to is not None:
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def calibrate_and_model_dpss(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
notebook_progressbar=False,
fg_model_comps_dict=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(
uvdata,
horizon=horizon,
min_dly=min_dly,
offset=offset,
include_autos=include_autos,
red_tol=red_tol,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=dpss_model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def fg_model(fg_r, fg_i, fg_comps):
vr = tf.reduce_sum(fg_r * fg_comps, axis=0)
vi = tf.reduce_sum(fg_i * fg_comps, axis=0)
return vr, vi
def data_model(g_r, g_i, fg_r, fg_i, fg_comps, ant0_inds, ant1_inds):
gr0 = tf.gather(g_r, ant0_inds)
gr1 = tf.gather(g_r, ant1_inds)
gi0 = tf.gather(g_i, ant0_inds)
gi1 = tf.gather(g_i, ant1_inds)
grgr = gr0 * gr1
gigi = gi0 * gi1
grgi = gr0 * gi1
gigr = gi0 * gr1
vr, vi = fg_model(fg_r, fg_i, fg_comps)
model_r = (grgr + gigi) * vr + (grgi - gigr) * vi
model_i = (gigr - grgi) * vr + (grgr + gigi) * vi
return model_r, model_i
def mse(model_r, model_i, data_r, data_i, wgts):
return tf.reduce_sum((tf.square(data_r - model_r) + tf.square(data_i - model_i)) * wgts)
def mse_chunked(g_r, g_i, fg_r, fg_i, fg_comps, nchunks, data_r, data_i, wgts, ant0_inds, ant1_inds, dtype=np.float32):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return tf.reduce_sum(tf.stack(cal_loss))
def mse_chunked_sum_regularized(
g_r,
g_i,
fg_r,
fg_i,
fg_comps,
nchunks,
data_r,
data_i,
wgts,
ant0_inds,
ant1_inds,
prior_r_sum,
prior_i_sum,
dtype=np.float32,
):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_i_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_r_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
# compute sum of real and imag parts x weights for regularization.
model_r_sum[cnum] += tf.reduce_sum(model_r * wgts[cnum])
model_i_sum[cnum] += tf.reduce_sum(model_i * wgts[cnum])
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return (
tf.reduce_sum(tf.stack(cal_loss))
+ tf.square(tf.reduce_sum(tf.stack(model_r_sum)) - prior_r_sum)
+ tf.square(tf.reduce_sum(tf.stack(model_i_sum)) - prior_i_sum)
)
def read_calibrate_and_model_dpss(
input_data_files,
input_model_files=None,
input_gain_files=None,
resid_outfilename=None,
gain_outfilename=None,
model_outfilename=None,
fitted_info_outfilename=None,
x_orientation="east",
clobber=False,
bllen_min=0.0,
bllen_max=np.inf,
bl_ew_min=0.0,
ex_ants=None,
select_ants=None,
gpu_index=None,
gpu_memory_limit=None,
precision=32,
use_autocorrs_in_weights=False,
**calibration_kwargs,
):
"""
Driver function for using calamity with DPSS modeling.
Parameters
----------
input_data_files: list of strings or UVData object.
list of paths to input files to read in and calibrate.
input_model_files: list of strings or UVData object, optional
list of paths to model files for overal phase/amp reference.
Default is None -> use input files as model for overall
phase and amplitude calibration.
input_gain_files: list of strings or UVCal object, optional
list of paths to gain files to use as initial guesses for calibration.
resid_outfilename: str, optional
path for file to write residuals.
default is None -> don't write out residuals.
gain_outfilename: str, optional
path to gain calfits to write fitted gains.
default is None -> don't write out gains.
model_outfilename, str, optional
path to file to write model output.
default is None -> Don't write model.
fitting_info_outfilename, str, optional
string to pickel fitting info to.
n_output_chunks: int optional
split up outputs into n_output_chunks chunked by time.
default is None -> write single output file.
bllen_min: float, optional
select all baselines with length greater then this value [meters].
default is 0.0
bllen_max: float, optional
select only baselines with length less then this value [meters].
default is np.inf.
bl_ew_min: float, optional
select all baselines with EW projected length greater then this value [meters].
default is 0.0
gpu_index: int, optional
limit visible GPUs to be the index of this GPU.
default: None -> all GPUs are visible.
gpu_memory_limit: float, optional
GiB of memory on GPU that can be used.
default None -> all memory available.
use_autocorrs_in_weights: bool, optional
if True, use smooth fits to autocorrelations as
inverse variance weights.
default is False.
calibration_kwargs: kwarg dict
see kwrags for calibration_and_model_dpss()
Returns
-------
model_fit: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid_fit: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains_fit: UVCal object
uvcal object containing fitted gains.
fit_info:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
gpus = tf.config.list_physical_devices("GPU")
if gpu_index is not None:
# See https://www.tensorflow.org/guide/gpu
if gpus:
if gpu_memory_limit is None:
tf.config.set_visible_devices(gpus[gpu_index], "GPU")
else:
tf.config.set_logical_device_configuration(
gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=gpu_memory_limit * 1024)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(
uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants
)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if input_model_files is not None:
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if uvd_model is not None:
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if input_gain_files is not None:
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
# run calibration with specified GPU device.
dtype = {32: np.float32, 64: np.float64}[precision]
if gpu_index is not None and gpus:
with tf.device(f"/device:GPU:{gpus[gpu_index].name[-1]}"):
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
else:
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
if resid_outfilename is not None:
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if gain_outfilename is not None:
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if model_outfilename is not None:
model_fit.write_uvh5(model_outfilename, clobber=clobber)
# don't write fitting_info_outfilename for now.
fit_info["calibration_kwargs"] = calibration_kwargs
fit_info["calibration_kwargs"]["dtype"] = dtype
# don't write fitting_info_outfilename for now.
return model_fit, resid_fit, gains_fit, fit_info
def input_output_parser():
ap = argparse.ArgumentParser()
sp = ap.add_argument_group("Input and Output Arguments.")
sp.add_argument("--input_data_files", type=str, nargs="+", help="paths to data files to calibrate.", required=True)
sp.add_argument(
"--input_model_files", type=str, nargs="+", help="paths to model files to set overal amplitude and phase."
)
sp.add_argument("--input_gain_files", type=str, nargs="+", help="paths to gains to use as a staring point.")
sp.add_argument("--resid_outfilename", type=str, default=None, help="postfix for resid output file.")
sp.add_argument("--model_outfilename", type=str, default=None, help="postfix for foreground model file.")
sp.add_argument("--gain_outfilename", type=str, default=None, help="path for writing fitted gains.")
sp.add_argument("--clobber", action="store_true", default="False", help="Overwrite existing outputs.")
sp.add_argument("--x_orientation", default="east", type=str, help="x_orientation of feeds to set in output gains.")
sp.add_argument(
"--bllen_min", default=0.0, type=float, help="minimum baseline length to include in calibration and outputs."
)
sp.add_argument(
"--bllen_max", default=np.inf, type=float, help="maximum baseline length to include in calbration and outputs."
)
sp.add_argument(
"--bl_ew_min",
default=0.0,
type=float,
help="minimum EW baseline component to include in calibration and outputs.",
)
sp.add_argument(
"--ex_ants", default=None, type=int, nargs="+", help="Antennas to exclude from calibration and modeling."
)
sp.add_argument(
"--select_ants",
default=None,
type=int,
nargs="+",
help="Antennas to select exclusively for calibration and modeling.",
)
sp.add_argument("--gpu_index", default=None, type=int, help="Index of GPU to run on (if on a multi-GPU machine).")
sp.add_argument("--gpu_memory_limit", default=None, type=int, help="Limit GPU memory use to this many GBytes.")
sp.add_argument("--precision", default=32, type=int, help="Number of bits to keep track of.")
return ap
def fitting_argparser():
ap = input_output_parser()
sp = ap.add_argument_group("General Fitting Arguments.")
sp.add_argument(
"--tol",
type=float,
default=1e-14,
help="Stop gradient descent after cost function converges to within this value.",
)
sp.add_argument(
"--optimizer", type=str, default="Adamax", help="First order optimizer to use for gradient descent."
)
sp.add_argument("--maxsteps", type=int, default=10000, help="Max number of steps to iterate during optimization.")
sp.add_argument("--verbose", default=False, action="store_true", help="lots of text ouputs.")
sp.add_argument(
"--use_min",
default=False,
action="store_true",
help="Use params for mimimum cost function derived. Otherwise, use the params last visited by the descent. Avoids momentum overshoot.",
)
sp.add_argument(
"--use_redundancy",
default=False,
action="store_true",
help="Model redundant visibilities with the same set of foreground parameters.",
)
sp.add_argument(
"--correct_model", default=True, action="store_true", help="Remove gain effects from foreground model."
)
sp.add_argument(
"--correct_resid", default=False, action="store_true", help="Apply fitted gains to the fitted residuals."
)
sp.add_argument(
"--graph_mode",
default=False,
action="store_true",
help="Pre-compile computational graph before running gradient descent. Not reccomended for GPUs.",
)
sp.add_argument(
"--init_guesses_from_previous_time_step",
default=False,
action="store_true",
help="initialize gain and foreground guesses from previous time step when calibrating multiple times.",
)
sp.add_argument("--learning_rate", type=float, default=1e-2, help="gradient descent learning rate.")
sp.add_argument(
"--red_tol", type=float, default=1.0, help="Tolerance for determining redundancy between baselines [meters]."
)
sp.add_argument(
"--skip_threshold",
type=float,
default=0.5,
help="Skip and flag time/polarization if more then this fractionf of data is flagged.",
)
sp.add_argument("--model_regularization", type=str, default="post_hoc")
sp.add_argument(
"--nsamples_in_weights", default=False, action="store_true", help="Weight contributions to MSE by nsamples."
)
sp.add_argument(
"--use_model_snr_weights",
default=False,
action="store_true",
help="If True, weight contributions to MSE as proportional to SNR.",
)
sp.add_argument(
"--use_autocorrs_in_weights",
default=False,
action="store_true",
help="If True, use autocorrelations to derive relative SNR weights.",
)
return ap
def dpss_fit_argparser():
ap = fitting_argparser()
sp = ap.add_argument_group("DPSS Specific Fitting Arguments.")
sp.add_argument("--horizon", default=1.0, type=float, help="Fraction of horizon delay to model with DPSS modes.")
sp.add_argument("--min_dly", default=0.0, type=float, help="Minimum delay [ns] to model with DPSS modes.")
sp.add_argument(
"--offset", default=0.0, type=float, help="Offset from horizon delay [ns] to model with DPSS modes."
)
return ap
|
create_cg_snapshot
|
Creates a consistency group snapshot out of one or more flexvols.
ONTAP requires an invocation of cg-start to first fence off the
flexvols to be included in the snapshot. If cg-start returns
success, a cg-commit must be executed to finalized the snapshot and
unfence the flexvols.
|
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
DELETED_PREFIX = 'deleted_cinder_'
MAX_SIZE_FOR_A_LUN = '17555678822400'
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(object):
def __init__(self, **kwargs):
host = kwargs['hostname']
username = kwargs['username']
password = kwargs['password']
api_trace_pattern = kwargs['api_trace_pattern']
self.connection = netapp_api.NaServer(
host=host,
transport_type=kwargs['transport_type'],
port=kwargs['port'],
username=username,
password=password,
api_trace_pattern=api_trace_pattern)
self.ssh_client = self._init_ssh_client(host, username, password)
def _init_ssh_client(self, host, username, password):
return netapp_api.SSHUtil(
host=host,
username=username,
password=password)
def _init_features(self):
"""Set up the repository of available Data ONTAP features."""
self.features = na_utils.Features()
def get_ontap_version(self, cached=True):
"""Gets the ONTAP version."""
if cached:
return self.connection.get_ontap_version()
ontap_version = netapp_api.NaElement("system-get-version")
result = self.connection.invoke_successfully(ontap_version, True)
version_tuple = result.get_child_by_name(
'version-tuple') or netapp_api.NaElement('none')
system_version_tuple = version_tuple.get_child_by_name(
'system-version-tuple') or netapp_api.NaElement('none')
generation = system_version_tuple.get_child_content("generation")
major = system_version_tuple.get_child_content("major")
return '%(generation)s.%(major)s' % {
'generation': generation,
'major': major}
def get_ontapi_version(self, cached=True):
"""Gets the supported ontapi version."""
if cached:
return self.connection.get_api_version()
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return major, minor
def _strip_xml_namespace(self, string):
if string.startswith('{') and '}' in string:
return string.split('}', 1)[1]
return string
def check_is_naelement(self, elem):
"""Checks if object is instance of NaElement."""
if not isinstance(elem, netapp_api.NaElement):
raise ValueError('Expects NaElement')
def create_lun(self, volume_name, lun_name, size, metadata,
qos_policy_group_name=None):
"""Issues API request for creating LUN on volume."""
path = '/vol/%s/%s' % (volume_name, lun_name)
space_reservation = metadata['SpaceReserved']
initial_size = size
ontap_version = self.get_ontap_version()
# On older ONTAP versions the extend size is limited to its
# geometry on max_resize_size. In order to remove this
# limitation we create the LUN with its maximum possible size
# and then shrink to the requested size.
if ontap_version < '9.5':
initial_size = MAX_SIZE_FOR_A_LUN
# In order to create a LUN with its maximum size (16TB),
# the space_reservation needs to be disabled
space_reservation = 'false'
params = {'path': path, 'size': str(initial_size),
'ostype': metadata['OsType'],
'space-reservation-enabled': space_reservation}
version = self.get_ontapi_version()
if version >= (1, 110):
params['use-exact-size'] = 'true'
lun_create = netapp_api.NaElement.create_node_with_children(
'lun-create-by-size',
**params)
if qos_policy_group_name:
lun_create.add_new_child('qos-policy-group', qos_policy_group_name)
try:
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
LOG.error("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s",
{'lun_name': lun_name,
'volume_name': volume_name,
'ex': ex})
if ontap_version < '9.5':
self.do_direct_resize(path, six.text_type(size))
if metadata['SpaceReserved'] == 'true':
self.set_lun_space_reservation(path, True)
def set_lun_space_reservation(self, path, flag):
"""Sets the LUN space reservation on ONTAP."""
lun_modify_space_reservation = (
netapp_api.NaElement.create_node_with_children(
'lun-set-space-reservation-info', **{
'path': path,
'enable': str(flag)}))
self.connection.invoke_successfully(lun_modify_space_reservation, True)
def destroy_lun(self, path, force=True):
"""Destroys the LUN at the path."""
lun_destroy = netapp_api.NaElement.create_node_with_children(
'lun-destroy',
**{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug("Destroyed LUN %s", seg[-1])
def map_lun(self, path, igroup_name, lun_id=None):
"""Maps LUN to the initiator and returns LUN id assigned."""
lun_map = netapp_api.NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.connection.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except netapp_api.NaApiError as e:
code = e.code
message = e.message
LOG.warning('Error mapping LUN. Code :%(code)s, Message: '
'%(message)s', {'code': code, 'message': message})
raise
def unmap_lun(self, path, igroup_name):
"""Unmaps a LUN from given initiator."""
lun_unmap = netapp_api.NaElement.create_node_with_children(
'lun-unmap',
**{'path': path, 'initiator-group': igroup_name})
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
exc_info = sys.exc_info()
LOG.warning("Error unmapping LUN. Code :%(code)s, Message: "
"%(message)s", {'code': e.code,
'message': e.message})
# if the LUN is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
six.reraise(*exc_info)
def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
"""Creates igroup with specified args."""
igroup_create = netapp_api.NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
self.connection.invoke_successfully(igroup_create, True)
def add_igroup_initiator(self, igroup, initiator):
"""Adds initiators to the specified igroup."""
igroup_add = netapp_api.NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.connection.invoke_successfully(igroup_add, True)
def do_direct_resize(self, path, new_size_bytes, force=True):
"""Resize the LUN."""
seg = path.split("/")
LOG.info("Resizing LUN %s directly to new size.", seg[-1])
lun_resize = netapp_api.NaElement.create_node_with_children(
'lun-resize',
**{'path': path,
'size': new_size_bytes})
if force:
lun_resize.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_resize, True)
def get_lun_geometry(self, path):
"""Gets the LUN geometry."""
geometry = {}
lun_geo = netapp_api.NaElement("lun-get-geometry")
lun_geo.add_new_child('path', path)
try:
result = self.connection.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content("size")
geometry['bytes_per_sector'] = result.get_child_content(
"bytes-per-sector")
geometry['sectors_per_track'] = result.get_child_content(
"sectors-per-track")
geometry['tracks_per_cylinder'] = result.get_child_content(
"tracks-per-cylinder")
geometry['cylinders'] = result.get_child_content("cylinders")
geometry['max_resize'] = result.get_child_content(
"max-resize-size")
except Exception as e:
LOG.error("LUN %(path)s geometry failed. Message - %(msg)s",
{'path': path, 'msg': six.text_type(e)})
return geometry
def get_volume_options(self, volume_name):
"""Get the value for the volume option."""
opts = []
vol_option_list = netapp_api.NaElement("volume-options-list-info")
vol_option_list.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(vol_option_list, True)
options = result.get_child_by_name("options")
if options:
opts = options.get_children()
return opts
def move_lun(self, path, new_path):
"""Moves the LUN at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
LOG.debug("Moving LUN %(name)s to %(new_name)s.",
{'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = netapp_api.NaElement("lun-move")
lun_move.add_new_child("path", path)
lun_move.add_new_child("new-path", new_path)
self.connection.invoke_successfully(lun_move, True)
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
raise NotImplementedError()
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
raise NotImplementedError()
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
raise NotImplementedError()
def check_iscsi_initiator_exists(self, iqn):
"""Returns True if initiator exists."""
raise NotImplementedError()
def set_iscsi_chap_authentication(self, iqn, username, password):
"""Provides NetApp host's CHAP credentials to the backend."""
raise NotImplementedError()
def get_lun_list(self):
"""Gets the list of LUNs on filer."""
raise NotImplementedError()
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
raise NotImplementedError()
def _has_luns_mapped_to_initiator(self, initiator):
"""Checks whether any LUNs are mapped to the given initiator."""
lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info')
lun_list_api.add_new_child('initiator', initiator)
result = self.connection.invoke_successfully(lun_list_api, True)
lun_maps_container = result.get_child_by_name(
'lun-maps') or netapp_api.NaElement('none')
return len(lun_maps_container.get_children()) > 0
def has_luns_mapped_to_initiators(self, initiator_list):
"""Checks whether any LUNs are mapped to the given initiator(s)."""
for initiator in initiator_list:
if self._has_luns_mapped_to_initiator(initiator):
return True
return False
def get_lun_by_args(self, **args):
"""Retrieves LUNs with specified args."""
raise NotImplementedError()
def get_performance_counter_info(self, object_name, counter_name):
"""Gets info about one or more Data ONTAP performance counters."""
api_args = {'objectname': object_name}
result = self.connection.send_request('perf-object-counter-list-info',
api_args,
enable_tunneling=False)
counters = result.get_child_by_name(
'counters') or netapp_api.NaElement('None')
for counter in counters.get_children():
if counter.get_child_content('name') == counter_name:
labels = []
label_list = counter.get_child_by_name(
'labels') or netapp_api.NaElement('None')
for label in label_list.get_children():
labels.extend(label.get_content().split(','))
base_counter = counter.get_child_content('base-counter')
return {
'name': counter_name,
'labels': labels,
'base-counter': base_counter,
}
else:
raise exception.NotFound(_('Counter %s not found') % counter_name)
def delete_snapshot(self, volume_name, snapshot_name):
"""Deletes a volume snapshot."""
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
self.connection.send_request('snapshot-delete', api_args)
# MASKED: create_cg_snapshot function (lines 367-379)
def _start_cg_snapshot(self, volume_names, snapshot_name):
snapshot_init = {
'snapshot': snapshot_name,
'timeout': 'relaxed',
'volumes': [
{'volume-name': volume_name} for volume_name in volume_names
],
}
result = self.connection.send_request('cg-start', snapshot_init)
return result.get_child_content('cg-id')
def _commit_cg_snapshot(self, cg_id):
snapshot_commit = {'cg-id': cg_id}
self.connection.send_request('cg-commit', snapshot_commit)
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
raise NotImplementedError()
@utils.retry(exception.SnapshotIsBusy)
def wait_for_busy_snapshot(self, flexvol, snapshot_name):
"""Checks for and handles a busy snapshot.
If a snapshot is busy, for reasons other than cloning, an exception is
raised immediately. Otherwise, wait for a period of time for the clone
dependency to finish before giving up. If the snapshot is not busy then
no action is taken and the method exits.
"""
snapshot = self.get_snapshot(flexvol, snapshot_name)
if not snapshot['busy']:
LOG.debug("Backing consistency group snapshot %s available for "
"deletion.", snapshot_name)
return
else:
LOG.debug("Snapshot %(snap)s for vol %(vol)s is busy, waiting "
"for volume clone dependency to clear.",
{"snap": snapshot_name, "vol": flexvol})
raise exception.SnapshotIsBusy(snapshot_name=snapshot_name)
def mark_snapshot_for_deletion(self, volume, snapshot_name):
"""Mark snapshot for deletion by renaming snapshot."""
return self.rename_snapshot(
volume, snapshot_name, DELETED_PREFIX + snapshot_name)
def rename_snapshot(self, volume, current_name, new_name):
"""Renames a snapshot."""
api_args = {
'volume': volume,
'current-name': current_name,
'new-name': new_name,
}
return self.connection.send_request('snapshot-rename', api_args)
|
def create_cg_snapshot(self, volume_names, snapshot_name):
"""Creates a consistency group snapshot out of one or more flexvols.
ONTAP requires an invocation of cg-start to first fence off the
flexvols to be included in the snapshot. If cg-start returns
success, a cg-commit must be executed to finalized the snapshot and
unfence the flexvols.
"""
cg_id = self._start_cg_snapshot(volume_names, snapshot_name)
if not cg_id:
msg = _('Could not start consistency group snapshot %s.')
raise exception.VolumeBackendAPIException(data=msg % snapshot_name)
self._commit_cg_snapshot(cg_id)
| 367 | 379 |
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
DELETED_PREFIX = 'deleted_cinder_'
MAX_SIZE_FOR_A_LUN = '17555678822400'
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(object):
def __init__(self, **kwargs):
host = kwargs['hostname']
username = kwargs['username']
password = kwargs['password']
api_trace_pattern = kwargs['api_trace_pattern']
self.connection = netapp_api.NaServer(
host=host,
transport_type=kwargs['transport_type'],
port=kwargs['port'],
username=username,
password=password,
api_trace_pattern=api_trace_pattern)
self.ssh_client = self._init_ssh_client(host, username, password)
def _init_ssh_client(self, host, username, password):
return netapp_api.SSHUtil(
host=host,
username=username,
password=password)
def _init_features(self):
"""Set up the repository of available Data ONTAP features."""
self.features = na_utils.Features()
def get_ontap_version(self, cached=True):
"""Gets the ONTAP version."""
if cached:
return self.connection.get_ontap_version()
ontap_version = netapp_api.NaElement("system-get-version")
result = self.connection.invoke_successfully(ontap_version, True)
version_tuple = result.get_child_by_name(
'version-tuple') or netapp_api.NaElement('none')
system_version_tuple = version_tuple.get_child_by_name(
'system-version-tuple') or netapp_api.NaElement('none')
generation = system_version_tuple.get_child_content("generation")
major = system_version_tuple.get_child_content("major")
return '%(generation)s.%(major)s' % {
'generation': generation,
'major': major}
def get_ontapi_version(self, cached=True):
"""Gets the supported ontapi version."""
if cached:
return self.connection.get_api_version()
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return major, minor
def _strip_xml_namespace(self, string):
if string.startswith('{') and '}' in string:
return string.split('}', 1)[1]
return string
def check_is_naelement(self, elem):
"""Checks if object is instance of NaElement."""
if not isinstance(elem, netapp_api.NaElement):
raise ValueError('Expects NaElement')
def create_lun(self, volume_name, lun_name, size, metadata,
qos_policy_group_name=None):
"""Issues API request for creating LUN on volume."""
path = '/vol/%s/%s' % (volume_name, lun_name)
space_reservation = metadata['SpaceReserved']
initial_size = size
ontap_version = self.get_ontap_version()
# On older ONTAP versions the extend size is limited to its
# geometry on max_resize_size. In order to remove this
# limitation we create the LUN with its maximum possible size
# and then shrink to the requested size.
if ontap_version < '9.5':
initial_size = MAX_SIZE_FOR_A_LUN
# In order to create a LUN with its maximum size (16TB),
# the space_reservation needs to be disabled
space_reservation = 'false'
params = {'path': path, 'size': str(initial_size),
'ostype': metadata['OsType'],
'space-reservation-enabled': space_reservation}
version = self.get_ontapi_version()
if version >= (1, 110):
params['use-exact-size'] = 'true'
lun_create = netapp_api.NaElement.create_node_with_children(
'lun-create-by-size',
**params)
if qos_policy_group_name:
lun_create.add_new_child('qos-policy-group', qos_policy_group_name)
try:
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
LOG.error("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s",
{'lun_name': lun_name,
'volume_name': volume_name,
'ex': ex})
if ontap_version < '9.5':
self.do_direct_resize(path, six.text_type(size))
if metadata['SpaceReserved'] == 'true':
self.set_lun_space_reservation(path, True)
def set_lun_space_reservation(self, path, flag):
"""Sets the LUN space reservation on ONTAP."""
lun_modify_space_reservation = (
netapp_api.NaElement.create_node_with_children(
'lun-set-space-reservation-info', **{
'path': path,
'enable': str(flag)}))
self.connection.invoke_successfully(lun_modify_space_reservation, True)
def destroy_lun(self, path, force=True):
"""Destroys the LUN at the path."""
lun_destroy = netapp_api.NaElement.create_node_with_children(
'lun-destroy',
**{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug("Destroyed LUN %s", seg[-1])
def map_lun(self, path, igroup_name, lun_id=None):
"""Maps LUN to the initiator and returns LUN id assigned."""
lun_map = netapp_api.NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.connection.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except netapp_api.NaApiError as e:
code = e.code
message = e.message
LOG.warning('Error mapping LUN. Code :%(code)s, Message: '
'%(message)s', {'code': code, 'message': message})
raise
def unmap_lun(self, path, igroup_name):
"""Unmaps a LUN from given initiator."""
lun_unmap = netapp_api.NaElement.create_node_with_children(
'lun-unmap',
**{'path': path, 'initiator-group': igroup_name})
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
exc_info = sys.exc_info()
LOG.warning("Error unmapping LUN. Code :%(code)s, Message: "
"%(message)s", {'code': e.code,
'message': e.message})
# if the LUN is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
six.reraise(*exc_info)
def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
"""Creates igroup with specified args."""
igroup_create = netapp_api.NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
self.connection.invoke_successfully(igroup_create, True)
def add_igroup_initiator(self, igroup, initiator):
"""Adds initiators to the specified igroup."""
igroup_add = netapp_api.NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.connection.invoke_successfully(igroup_add, True)
def do_direct_resize(self, path, new_size_bytes, force=True):
"""Resize the LUN."""
seg = path.split("/")
LOG.info("Resizing LUN %s directly to new size.", seg[-1])
lun_resize = netapp_api.NaElement.create_node_with_children(
'lun-resize',
**{'path': path,
'size': new_size_bytes})
if force:
lun_resize.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_resize, True)
def get_lun_geometry(self, path):
"""Gets the LUN geometry."""
geometry = {}
lun_geo = netapp_api.NaElement("lun-get-geometry")
lun_geo.add_new_child('path', path)
try:
result = self.connection.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content("size")
geometry['bytes_per_sector'] = result.get_child_content(
"bytes-per-sector")
geometry['sectors_per_track'] = result.get_child_content(
"sectors-per-track")
geometry['tracks_per_cylinder'] = result.get_child_content(
"tracks-per-cylinder")
geometry['cylinders'] = result.get_child_content("cylinders")
geometry['max_resize'] = result.get_child_content(
"max-resize-size")
except Exception as e:
LOG.error("LUN %(path)s geometry failed. Message - %(msg)s",
{'path': path, 'msg': six.text_type(e)})
return geometry
def get_volume_options(self, volume_name):
"""Get the value for the volume option."""
opts = []
vol_option_list = netapp_api.NaElement("volume-options-list-info")
vol_option_list.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(vol_option_list, True)
options = result.get_child_by_name("options")
if options:
opts = options.get_children()
return opts
def move_lun(self, path, new_path):
"""Moves the LUN at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
LOG.debug("Moving LUN %(name)s to %(new_name)s.",
{'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = netapp_api.NaElement("lun-move")
lun_move.add_new_child("path", path)
lun_move.add_new_child("new-path", new_path)
self.connection.invoke_successfully(lun_move, True)
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
raise NotImplementedError()
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
raise NotImplementedError()
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
raise NotImplementedError()
def check_iscsi_initiator_exists(self, iqn):
"""Returns True if initiator exists."""
raise NotImplementedError()
def set_iscsi_chap_authentication(self, iqn, username, password):
"""Provides NetApp host's CHAP credentials to the backend."""
raise NotImplementedError()
def get_lun_list(self):
"""Gets the list of LUNs on filer."""
raise NotImplementedError()
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
raise NotImplementedError()
def _has_luns_mapped_to_initiator(self, initiator):
"""Checks whether any LUNs are mapped to the given initiator."""
lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info')
lun_list_api.add_new_child('initiator', initiator)
result = self.connection.invoke_successfully(lun_list_api, True)
lun_maps_container = result.get_child_by_name(
'lun-maps') or netapp_api.NaElement('none')
return len(lun_maps_container.get_children()) > 0
def has_luns_mapped_to_initiators(self, initiator_list):
"""Checks whether any LUNs are mapped to the given initiator(s)."""
for initiator in initiator_list:
if self._has_luns_mapped_to_initiator(initiator):
return True
return False
def get_lun_by_args(self, **args):
"""Retrieves LUNs with specified args."""
raise NotImplementedError()
def get_performance_counter_info(self, object_name, counter_name):
"""Gets info about one or more Data ONTAP performance counters."""
api_args = {'objectname': object_name}
result = self.connection.send_request('perf-object-counter-list-info',
api_args,
enable_tunneling=False)
counters = result.get_child_by_name(
'counters') or netapp_api.NaElement('None')
for counter in counters.get_children():
if counter.get_child_content('name') == counter_name:
labels = []
label_list = counter.get_child_by_name(
'labels') or netapp_api.NaElement('None')
for label in label_list.get_children():
labels.extend(label.get_content().split(','))
base_counter = counter.get_child_content('base-counter')
return {
'name': counter_name,
'labels': labels,
'base-counter': base_counter,
}
else:
raise exception.NotFound(_('Counter %s not found') % counter_name)
def delete_snapshot(self, volume_name, snapshot_name):
"""Deletes a volume snapshot."""
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
self.connection.send_request('snapshot-delete', api_args)
def create_cg_snapshot(self, volume_names, snapshot_name):
"""Creates a consistency group snapshot out of one or more flexvols.
ONTAP requires an invocation of cg-start to first fence off the
flexvols to be included in the snapshot. If cg-start returns
success, a cg-commit must be executed to finalized the snapshot and
unfence the flexvols.
"""
cg_id = self._start_cg_snapshot(volume_names, snapshot_name)
if not cg_id:
msg = _('Could not start consistency group snapshot %s.')
raise exception.VolumeBackendAPIException(data=msg % snapshot_name)
self._commit_cg_snapshot(cg_id)
def _start_cg_snapshot(self, volume_names, snapshot_name):
snapshot_init = {
'snapshot': snapshot_name,
'timeout': 'relaxed',
'volumes': [
{'volume-name': volume_name} for volume_name in volume_names
],
}
result = self.connection.send_request('cg-start', snapshot_init)
return result.get_child_content('cg-id')
def _commit_cg_snapshot(self, cg_id):
snapshot_commit = {'cg-id': cg_id}
self.connection.send_request('cg-commit', snapshot_commit)
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
raise NotImplementedError()
@utils.retry(exception.SnapshotIsBusy)
def wait_for_busy_snapshot(self, flexvol, snapshot_name):
"""Checks for and handles a busy snapshot.
If a snapshot is busy, for reasons other than cloning, an exception is
raised immediately. Otherwise, wait for a period of time for the clone
dependency to finish before giving up. If the snapshot is not busy then
no action is taken and the method exits.
"""
snapshot = self.get_snapshot(flexvol, snapshot_name)
if not snapshot['busy']:
LOG.debug("Backing consistency group snapshot %s available for "
"deletion.", snapshot_name)
return
else:
LOG.debug("Snapshot %(snap)s for vol %(vol)s is busy, waiting "
"for volume clone dependency to clear.",
{"snap": snapshot_name, "vol": flexvol})
raise exception.SnapshotIsBusy(snapshot_name=snapshot_name)
def mark_snapshot_for_deletion(self, volume, snapshot_name):
"""Mark snapshot for deletion by renaming snapshot."""
return self.rename_snapshot(
volume, snapshot_name, DELETED_PREFIX + snapshot_name)
def rename_snapshot(self, volume, current_name, new_name):
"""Renames a snapshot."""
api_args = {
'volume': volume,
'current-name': current_name,
'new-name': new_name,
}
return self.connection.send_request('snapshot-rename', api_args)
|
wait_for_busy_snapshot
|
Checks for and handles a busy snapshot.
If a snapshot is busy, for reasons other than cloning, an exception is
raised immediately. Otherwise, wait for a period of time for the clone
dependency to finish before giving up. If the snapshot is not busy then
no action is taken and the method exits.
|
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
DELETED_PREFIX = 'deleted_cinder_'
MAX_SIZE_FOR_A_LUN = '17555678822400'
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(object):
def __init__(self, **kwargs):
host = kwargs['hostname']
username = kwargs['username']
password = kwargs['password']
api_trace_pattern = kwargs['api_trace_pattern']
self.connection = netapp_api.NaServer(
host=host,
transport_type=kwargs['transport_type'],
port=kwargs['port'],
username=username,
password=password,
api_trace_pattern=api_trace_pattern)
self.ssh_client = self._init_ssh_client(host, username, password)
def _init_ssh_client(self, host, username, password):
return netapp_api.SSHUtil(
host=host,
username=username,
password=password)
def _init_features(self):
"""Set up the repository of available Data ONTAP features."""
self.features = na_utils.Features()
def get_ontap_version(self, cached=True):
"""Gets the ONTAP version."""
if cached:
return self.connection.get_ontap_version()
ontap_version = netapp_api.NaElement("system-get-version")
result = self.connection.invoke_successfully(ontap_version, True)
version_tuple = result.get_child_by_name(
'version-tuple') or netapp_api.NaElement('none')
system_version_tuple = version_tuple.get_child_by_name(
'system-version-tuple') or netapp_api.NaElement('none')
generation = system_version_tuple.get_child_content("generation")
major = system_version_tuple.get_child_content("major")
return '%(generation)s.%(major)s' % {
'generation': generation,
'major': major}
def get_ontapi_version(self, cached=True):
"""Gets the supported ontapi version."""
if cached:
return self.connection.get_api_version()
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return major, minor
def _strip_xml_namespace(self, string):
if string.startswith('{') and '}' in string:
return string.split('}', 1)[1]
return string
def check_is_naelement(self, elem):
"""Checks if object is instance of NaElement."""
if not isinstance(elem, netapp_api.NaElement):
raise ValueError('Expects NaElement')
def create_lun(self, volume_name, lun_name, size, metadata,
qos_policy_group_name=None):
"""Issues API request for creating LUN on volume."""
path = '/vol/%s/%s' % (volume_name, lun_name)
space_reservation = metadata['SpaceReserved']
initial_size = size
ontap_version = self.get_ontap_version()
# On older ONTAP versions the extend size is limited to its
# geometry on max_resize_size. In order to remove this
# limitation we create the LUN with its maximum possible size
# and then shrink to the requested size.
if ontap_version < '9.5':
initial_size = MAX_SIZE_FOR_A_LUN
# In order to create a LUN with its maximum size (16TB),
# the space_reservation needs to be disabled
space_reservation = 'false'
params = {'path': path, 'size': str(initial_size),
'ostype': metadata['OsType'],
'space-reservation-enabled': space_reservation}
version = self.get_ontapi_version()
if version >= (1, 110):
params['use-exact-size'] = 'true'
lun_create = netapp_api.NaElement.create_node_with_children(
'lun-create-by-size',
**params)
if qos_policy_group_name:
lun_create.add_new_child('qos-policy-group', qos_policy_group_name)
try:
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
LOG.error("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s",
{'lun_name': lun_name,
'volume_name': volume_name,
'ex': ex})
if ontap_version < '9.5':
self.do_direct_resize(path, six.text_type(size))
if metadata['SpaceReserved'] == 'true':
self.set_lun_space_reservation(path, True)
def set_lun_space_reservation(self, path, flag):
"""Sets the LUN space reservation on ONTAP."""
lun_modify_space_reservation = (
netapp_api.NaElement.create_node_with_children(
'lun-set-space-reservation-info', **{
'path': path,
'enable': str(flag)}))
self.connection.invoke_successfully(lun_modify_space_reservation, True)
def destroy_lun(self, path, force=True):
"""Destroys the LUN at the path."""
lun_destroy = netapp_api.NaElement.create_node_with_children(
'lun-destroy',
**{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug("Destroyed LUN %s", seg[-1])
def map_lun(self, path, igroup_name, lun_id=None):
"""Maps LUN to the initiator and returns LUN id assigned."""
lun_map = netapp_api.NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.connection.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except netapp_api.NaApiError as e:
code = e.code
message = e.message
LOG.warning('Error mapping LUN. Code :%(code)s, Message: '
'%(message)s', {'code': code, 'message': message})
raise
def unmap_lun(self, path, igroup_name):
"""Unmaps a LUN from given initiator."""
lun_unmap = netapp_api.NaElement.create_node_with_children(
'lun-unmap',
**{'path': path, 'initiator-group': igroup_name})
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
exc_info = sys.exc_info()
LOG.warning("Error unmapping LUN. Code :%(code)s, Message: "
"%(message)s", {'code': e.code,
'message': e.message})
# if the LUN is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
six.reraise(*exc_info)
def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
"""Creates igroup with specified args."""
igroup_create = netapp_api.NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
self.connection.invoke_successfully(igroup_create, True)
def add_igroup_initiator(self, igroup, initiator):
"""Adds initiators to the specified igroup."""
igroup_add = netapp_api.NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.connection.invoke_successfully(igroup_add, True)
def do_direct_resize(self, path, new_size_bytes, force=True):
"""Resize the LUN."""
seg = path.split("/")
LOG.info("Resizing LUN %s directly to new size.", seg[-1])
lun_resize = netapp_api.NaElement.create_node_with_children(
'lun-resize',
**{'path': path,
'size': new_size_bytes})
if force:
lun_resize.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_resize, True)
def get_lun_geometry(self, path):
"""Gets the LUN geometry."""
geometry = {}
lun_geo = netapp_api.NaElement("lun-get-geometry")
lun_geo.add_new_child('path', path)
try:
result = self.connection.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content("size")
geometry['bytes_per_sector'] = result.get_child_content(
"bytes-per-sector")
geometry['sectors_per_track'] = result.get_child_content(
"sectors-per-track")
geometry['tracks_per_cylinder'] = result.get_child_content(
"tracks-per-cylinder")
geometry['cylinders'] = result.get_child_content("cylinders")
geometry['max_resize'] = result.get_child_content(
"max-resize-size")
except Exception as e:
LOG.error("LUN %(path)s geometry failed. Message - %(msg)s",
{'path': path, 'msg': six.text_type(e)})
return geometry
def get_volume_options(self, volume_name):
"""Get the value for the volume option."""
opts = []
vol_option_list = netapp_api.NaElement("volume-options-list-info")
vol_option_list.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(vol_option_list, True)
options = result.get_child_by_name("options")
if options:
opts = options.get_children()
return opts
def move_lun(self, path, new_path):
"""Moves the LUN at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
LOG.debug("Moving LUN %(name)s to %(new_name)s.",
{'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = netapp_api.NaElement("lun-move")
lun_move.add_new_child("path", path)
lun_move.add_new_child("new-path", new_path)
self.connection.invoke_successfully(lun_move, True)
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
raise NotImplementedError()
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
raise NotImplementedError()
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
raise NotImplementedError()
def check_iscsi_initiator_exists(self, iqn):
"""Returns True if initiator exists."""
raise NotImplementedError()
def set_iscsi_chap_authentication(self, iqn, username, password):
"""Provides NetApp host's CHAP credentials to the backend."""
raise NotImplementedError()
def get_lun_list(self):
"""Gets the list of LUNs on filer."""
raise NotImplementedError()
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
raise NotImplementedError()
def _has_luns_mapped_to_initiator(self, initiator):
"""Checks whether any LUNs are mapped to the given initiator."""
lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info')
lun_list_api.add_new_child('initiator', initiator)
result = self.connection.invoke_successfully(lun_list_api, True)
lun_maps_container = result.get_child_by_name(
'lun-maps') or netapp_api.NaElement('none')
return len(lun_maps_container.get_children()) > 0
def has_luns_mapped_to_initiators(self, initiator_list):
"""Checks whether any LUNs are mapped to the given initiator(s)."""
for initiator in initiator_list:
if self._has_luns_mapped_to_initiator(initiator):
return True
return False
def get_lun_by_args(self, **args):
"""Retrieves LUNs with specified args."""
raise NotImplementedError()
def get_performance_counter_info(self, object_name, counter_name):
"""Gets info about one or more Data ONTAP performance counters."""
api_args = {'objectname': object_name}
result = self.connection.send_request('perf-object-counter-list-info',
api_args,
enable_tunneling=False)
counters = result.get_child_by_name(
'counters') or netapp_api.NaElement('None')
for counter in counters.get_children():
if counter.get_child_content('name') == counter_name:
labels = []
label_list = counter.get_child_by_name(
'labels') or netapp_api.NaElement('None')
for label in label_list.get_children():
labels.extend(label.get_content().split(','))
base_counter = counter.get_child_content('base-counter')
return {
'name': counter_name,
'labels': labels,
'base-counter': base_counter,
}
else:
raise exception.NotFound(_('Counter %s not found') % counter_name)
def delete_snapshot(self, volume_name, snapshot_name):
"""Deletes a volume snapshot."""
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
self.connection.send_request('snapshot-delete', api_args)
def create_cg_snapshot(self, volume_names, snapshot_name):
"""Creates a consistency group snapshot out of one or more flexvols.
ONTAP requires an invocation of cg-start to first fence off the
flexvols to be included in the snapshot. If cg-start returns
success, a cg-commit must be executed to finalized the snapshot and
unfence the flexvols.
"""
cg_id = self._start_cg_snapshot(volume_names, snapshot_name)
if not cg_id:
msg = _('Could not start consistency group snapshot %s.')
raise exception.VolumeBackendAPIException(data=msg % snapshot_name)
self._commit_cg_snapshot(cg_id)
def _start_cg_snapshot(self, volume_names, snapshot_name):
snapshot_init = {
'snapshot': snapshot_name,
'timeout': 'relaxed',
'volumes': [
{'volume-name': volume_name} for volume_name in volume_names
],
}
result = self.connection.send_request('cg-start', snapshot_init)
return result.get_child_content('cg-id')
def _commit_cg_snapshot(self, cg_id):
snapshot_commit = {'cg-id': cg_id}
self.connection.send_request('cg-commit', snapshot_commit)
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
raise NotImplementedError()
# MASKED: wait_for_busy_snapshot function (lines 400-418)
def mark_snapshot_for_deletion(self, volume, snapshot_name):
"""Mark snapshot for deletion by renaming snapshot."""
return self.rename_snapshot(
volume, snapshot_name, DELETED_PREFIX + snapshot_name)
def rename_snapshot(self, volume, current_name, new_name):
"""Renames a snapshot."""
api_args = {
'volume': volume,
'current-name': current_name,
'new-name': new_name,
}
return self.connection.send_request('snapshot-rename', api_args)
|
@utils.retry(exception.SnapshotIsBusy)
def wait_for_busy_snapshot(self, flexvol, snapshot_name):
"""Checks for and handles a busy snapshot.
If a snapshot is busy, for reasons other than cloning, an exception is
raised immediately. Otherwise, wait for a period of time for the clone
dependency to finish before giving up. If the snapshot is not busy then
no action is taken and the method exits.
"""
snapshot = self.get_snapshot(flexvol, snapshot_name)
if not snapshot['busy']:
LOG.debug("Backing consistency group snapshot %s available for "
"deletion.", snapshot_name)
return
else:
LOG.debug("Snapshot %(snap)s for vol %(vol)s is busy, waiting "
"for volume clone dependency to clear.",
{"snap": snapshot_name, "vol": flexvol})
raise exception.SnapshotIsBusy(snapshot_name=snapshot_name)
| 400 | 418 |
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
DELETED_PREFIX = 'deleted_cinder_'
MAX_SIZE_FOR_A_LUN = '17555678822400'
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(object):
def __init__(self, **kwargs):
host = kwargs['hostname']
username = kwargs['username']
password = kwargs['password']
api_trace_pattern = kwargs['api_trace_pattern']
self.connection = netapp_api.NaServer(
host=host,
transport_type=kwargs['transport_type'],
port=kwargs['port'],
username=username,
password=password,
api_trace_pattern=api_trace_pattern)
self.ssh_client = self._init_ssh_client(host, username, password)
def _init_ssh_client(self, host, username, password):
return netapp_api.SSHUtil(
host=host,
username=username,
password=password)
def _init_features(self):
"""Set up the repository of available Data ONTAP features."""
self.features = na_utils.Features()
def get_ontap_version(self, cached=True):
"""Gets the ONTAP version."""
if cached:
return self.connection.get_ontap_version()
ontap_version = netapp_api.NaElement("system-get-version")
result = self.connection.invoke_successfully(ontap_version, True)
version_tuple = result.get_child_by_name(
'version-tuple') or netapp_api.NaElement('none')
system_version_tuple = version_tuple.get_child_by_name(
'system-version-tuple') or netapp_api.NaElement('none')
generation = system_version_tuple.get_child_content("generation")
major = system_version_tuple.get_child_content("major")
return '%(generation)s.%(major)s' % {
'generation': generation,
'major': major}
def get_ontapi_version(self, cached=True):
"""Gets the supported ontapi version."""
if cached:
return self.connection.get_api_version()
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return major, minor
def _strip_xml_namespace(self, string):
if string.startswith('{') and '}' in string:
return string.split('}', 1)[1]
return string
def check_is_naelement(self, elem):
"""Checks if object is instance of NaElement."""
if not isinstance(elem, netapp_api.NaElement):
raise ValueError('Expects NaElement')
def create_lun(self, volume_name, lun_name, size, metadata,
qos_policy_group_name=None):
"""Issues API request for creating LUN on volume."""
path = '/vol/%s/%s' % (volume_name, lun_name)
space_reservation = metadata['SpaceReserved']
initial_size = size
ontap_version = self.get_ontap_version()
# On older ONTAP versions the extend size is limited to its
# geometry on max_resize_size. In order to remove this
# limitation we create the LUN with its maximum possible size
# and then shrink to the requested size.
if ontap_version < '9.5':
initial_size = MAX_SIZE_FOR_A_LUN
# In order to create a LUN with its maximum size (16TB),
# the space_reservation needs to be disabled
space_reservation = 'false'
params = {'path': path, 'size': str(initial_size),
'ostype': metadata['OsType'],
'space-reservation-enabled': space_reservation}
version = self.get_ontapi_version()
if version >= (1, 110):
params['use-exact-size'] = 'true'
lun_create = netapp_api.NaElement.create_node_with_children(
'lun-create-by-size',
**params)
if qos_policy_group_name:
lun_create.add_new_child('qos-policy-group', qos_policy_group_name)
try:
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
LOG.error("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s",
{'lun_name': lun_name,
'volume_name': volume_name,
'ex': ex})
if ontap_version < '9.5':
self.do_direct_resize(path, six.text_type(size))
if metadata['SpaceReserved'] == 'true':
self.set_lun_space_reservation(path, True)
def set_lun_space_reservation(self, path, flag):
"""Sets the LUN space reservation on ONTAP."""
lun_modify_space_reservation = (
netapp_api.NaElement.create_node_with_children(
'lun-set-space-reservation-info', **{
'path': path,
'enable': str(flag)}))
self.connection.invoke_successfully(lun_modify_space_reservation, True)
def destroy_lun(self, path, force=True):
"""Destroys the LUN at the path."""
lun_destroy = netapp_api.NaElement.create_node_with_children(
'lun-destroy',
**{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug("Destroyed LUN %s", seg[-1])
def map_lun(self, path, igroup_name, lun_id=None):
"""Maps LUN to the initiator and returns LUN id assigned."""
lun_map = netapp_api.NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.connection.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except netapp_api.NaApiError as e:
code = e.code
message = e.message
LOG.warning('Error mapping LUN. Code :%(code)s, Message: '
'%(message)s', {'code': code, 'message': message})
raise
def unmap_lun(self, path, igroup_name):
"""Unmaps a LUN from given initiator."""
lun_unmap = netapp_api.NaElement.create_node_with_children(
'lun-unmap',
**{'path': path, 'initiator-group': igroup_name})
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
exc_info = sys.exc_info()
LOG.warning("Error unmapping LUN. Code :%(code)s, Message: "
"%(message)s", {'code': e.code,
'message': e.message})
# if the LUN is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
six.reraise(*exc_info)
def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
"""Creates igroup with specified args."""
igroup_create = netapp_api.NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
self.connection.invoke_successfully(igroup_create, True)
def add_igroup_initiator(self, igroup, initiator):
"""Adds initiators to the specified igroup."""
igroup_add = netapp_api.NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.connection.invoke_successfully(igroup_add, True)
def do_direct_resize(self, path, new_size_bytes, force=True):
"""Resize the LUN."""
seg = path.split("/")
LOG.info("Resizing LUN %s directly to new size.", seg[-1])
lun_resize = netapp_api.NaElement.create_node_with_children(
'lun-resize',
**{'path': path,
'size': new_size_bytes})
if force:
lun_resize.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_resize, True)
def get_lun_geometry(self, path):
"""Gets the LUN geometry."""
geometry = {}
lun_geo = netapp_api.NaElement("lun-get-geometry")
lun_geo.add_new_child('path', path)
try:
result = self.connection.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content("size")
geometry['bytes_per_sector'] = result.get_child_content(
"bytes-per-sector")
geometry['sectors_per_track'] = result.get_child_content(
"sectors-per-track")
geometry['tracks_per_cylinder'] = result.get_child_content(
"tracks-per-cylinder")
geometry['cylinders'] = result.get_child_content("cylinders")
geometry['max_resize'] = result.get_child_content(
"max-resize-size")
except Exception as e:
LOG.error("LUN %(path)s geometry failed. Message - %(msg)s",
{'path': path, 'msg': six.text_type(e)})
return geometry
def get_volume_options(self, volume_name):
"""Get the value for the volume option."""
opts = []
vol_option_list = netapp_api.NaElement("volume-options-list-info")
vol_option_list.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(vol_option_list, True)
options = result.get_child_by_name("options")
if options:
opts = options.get_children()
return opts
def move_lun(self, path, new_path):
"""Moves the LUN at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
LOG.debug("Moving LUN %(name)s to %(new_name)s.",
{'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = netapp_api.NaElement("lun-move")
lun_move.add_new_child("path", path)
lun_move.add_new_child("new-path", new_path)
self.connection.invoke_successfully(lun_move, True)
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
raise NotImplementedError()
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
raise NotImplementedError()
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
raise NotImplementedError()
def check_iscsi_initiator_exists(self, iqn):
"""Returns True if initiator exists."""
raise NotImplementedError()
def set_iscsi_chap_authentication(self, iqn, username, password):
"""Provides NetApp host's CHAP credentials to the backend."""
raise NotImplementedError()
def get_lun_list(self):
"""Gets the list of LUNs on filer."""
raise NotImplementedError()
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
raise NotImplementedError()
def _has_luns_mapped_to_initiator(self, initiator):
"""Checks whether any LUNs are mapped to the given initiator."""
lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info')
lun_list_api.add_new_child('initiator', initiator)
result = self.connection.invoke_successfully(lun_list_api, True)
lun_maps_container = result.get_child_by_name(
'lun-maps') or netapp_api.NaElement('none')
return len(lun_maps_container.get_children()) > 0
def has_luns_mapped_to_initiators(self, initiator_list):
"""Checks whether any LUNs are mapped to the given initiator(s)."""
for initiator in initiator_list:
if self._has_luns_mapped_to_initiator(initiator):
return True
return False
def get_lun_by_args(self, **args):
"""Retrieves LUNs with specified args."""
raise NotImplementedError()
def get_performance_counter_info(self, object_name, counter_name):
"""Gets info about one or more Data ONTAP performance counters."""
api_args = {'objectname': object_name}
result = self.connection.send_request('perf-object-counter-list-info',
api_args,
enable_tunneling=False)
counters = result.get_child_by_name(
'counters') or netapp_api.NaElement('None')
for counter in counters.get_children():
if counter.get_child_content('name') == counter_name:
labels = []
label_list = counter.get_child_by_name(
'labels') or netapp_api.NaElement('None')
for label in label_list.get_children():
labels.extend(label.get_content().split(','))
base_counter = counter.get_child_content('base-counter')
return {
'name': counter_name,
'labels': labels,
'base-counter': base_counter,
}
else:
raise exception.NotFound(_('Counter %s not found') % counter_name)
def delete_snapshot(self, volume_name, snapshot_name):
"""Deletes a volume snapshot."""
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
self.connection.send_request('snapshot-delete', api_args)
def create_cg_snapshot(self, volume_names, snapshot_name):
"""Creates a consistency group snapshot out of one or more flexvols.
ONTAP requires an invocation of cg-start to first fence off the
flexvols to be included in the snapshot. If cg-start returns
success, a cg-commit must be executed to finalized the snapshot and
unfence the flexvols.
"""
cg_id = self._start_cg_snapshot(volume_names, snapshot_name)
if not cg_id:
msg = _('Could not start consistency group snapshot %s.')
raise exception.VolumeBackendAPIException(data=msg % snapshot_name)
self._commit_cg_snapshot(cg_id)
def _start_cg_snapshot(self, volume_names, snapshot_name):
snapshot_init = {
'snapshot': snapshot_name,
'timeout': 'relaxed',
'volumes': [
{'volume-name': volume_name} for volume_name in volume_names
],
}
result = self.connection.send_request('cg-start', snapshot_init)
return result.get_child_content('cg-id')
def _commit_cg_snapshot(self, cg_id):
snapshot_commit = {'cg-id': cg_id}
self.connection.send_request('cg-commit', snapshot_commit)
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
raise NotImplementedError()
@utils.retry(exception.SnapshotIsBusy)
def wait_for_busy_snapshot(self, flexvol, snapshot_name):
"""Checks for and handles a busy snapshot.
If a snapshot is busy, for reasons other than cloning, an exception is
raised immediately. Otherwise, wait for a period of time for the clone
dependency to finish before giving up. If the snapshot is not busy then
no action is taken and the method exits.
"""
snapshot = self.get_snapshot(flexvol, snapshot_name)
if not snapshot['busy']:
LOG.debug("Backing consistency group snapshot %s available for "
"deletion.", snapshot_name)
return
else:
LOG.debug("Snapshot %(snap)s for vol %(vol)s is busy, waiting "
"for volume clone dependency to clear.",
{"snap": snapshot_name, "vol": flexvol})
raise exception.SnapshotIsBusy(snapshot_name=snapshot_name)
def mark_snapshot_for_deletion(self, volume, snapshot_name):
"""Mark snapshot for deletion by renaming snapshot."""
return self.rename_snapshot(
volume, snapshot_name, DELETED_PREFIX + snapshot_name)
def rename_snapshot(self, volume, current_name, new_name):
"""Renames a snapshot."""
api_args = {
'volume': volume,
'current-name': current_name,
'new-name': new_name,
}
return self.connection.send_request('snapshot-rename', api_args)
|
get_machine_learning_compute
|
Use this data source to access information about an existing resource.
:param str compute_name: Name of the Azure Machine Learning compute.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetMachineLearningComputeResult',
'AwaitableGetMachineLearningComputeResult',
'get_machine_learning_compute',
]
@pulumi.output_type
class GetMachineLearningComputeResult:
"""
Machine Learning compute object wrapped into ARM resource envelope.
"""
def __init__(__self__, identity=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Compute properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetMachineLearningComputeResult(GetMachineLearningComputeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMachineLearningComputeResult(
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
# MASKED: get_machine_learning_compute function (lines 118-146)
|
def get_machine_learning_compute(compute_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMachineLearningComputeResult:
"""
Use this data source to access information about an existing resource.
:param str compute_name: Name of the Azure Machine Learning compute.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['computeName'] = compute_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearningservices/v20200901preview:getMachineLearningCompute', __args__, opts=opts, typ=GetMachineLearningComputeResult).value
return AwaitableGetMachineLearningComputeResult(
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
| 118 | 146 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetMachineLearningComputeResult',
'AwaitableGetMachineLearningComputeResult',
'get_machine_learning_compute',
]
@pulumi.output_type
class GetMachineLearningComputeResult:
"""
Machine Learning compute object wrapped into ARM resource envelope.
"""
def __init__(__self__, identity=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Compute properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetMachineLearningComputeResult(GetMachineLearningComputeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMachineLearningComputeResult(
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_machine_learning_compute(compute_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMachineLearningComputeResult:
"""
Use this data source to access information about an existing resource.
:param str compute_name: Name of the Azure Machine Learning compute.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['computeName'] = compute_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearningservices/v20200901preview:getMachineLearningCompute', __args__, opts=opts, typ=GetMachineLearningComputeResult).value
return AwaitableGetMachineLearningComputeResult(
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
|
test_read_topojson
|
Test reading a TopoJSON file
The TopoJSON support in GDAL is a little unpredictable. In some versions
the geometries or properties aren't parsed correctly. Here we just check
that we can open the file, get the right number of features out, and
that they have a geometry and some properties. See GH#722.
|
"""
Support for TopoJSON was added in OGR 1.11 to the `GeoJSON` driver.
Starting at GDAL 2.3 support was moved to the `TopoJSON` driver.
"""
import fiona
from fiona.env import GDALVersion
import os
import pytest
from collections import OrderedDict
gdal_version = GDALVersion.runtime()
driver = "TopoJSON" if gdal_version.at_least((2, 3)) else "GeoJSON"
has_driver = driver in fiona.drvsupport.supported_drivers.keys()
# MASKED: test_read_topojson function (lines 18-35)
|
@pytest.mark.skipif(not gdal_version.at_least((1, 11)), reason="Requires GDAL >= 1.11")
@pytest.mark.skipif(not has_driver, reason="Requires {} driver".format(driver))
def test_read_topojson(data_dir):
"""Test reading a TopoJSON file
The TopoJSON support in GDAL is a little unpredictable. In some versions
the geometries or properties aren't parsed correctly. Here we just check
that we can open the file, get the right number of features out, and
that they have a geometry and some properties. See GH#722.
"""
with fiona.open(os.path.join(data_dir, "example.topojson"), "r") as collection:
features = list(collection)
assert len(features) == 3, "unexpected number of features"
for feature in features:
assert isinstance(feature["properties"], OrderedDict)
assert len(feature["properties"]) > 0
assert feature["geometry"]["type"] in {"Point", "LineString", "Polygon"}
| 18 | 35 |
"""
Support for TopoJSON was added in OGR 1.11 to the `GeoJSON` driver.
Starting at GDAL 2.3 support was moved to the `TopoJSON` driver.
"""
import fiona
from fiona.env import GDALVersion
import os
import pytest
from collections import OrderedDict
gdal_version = GDALVersion.runtime()
driver = "TopoJSON" if gdal_version.at_least((2, 3)) else "GeoJSON"
has_driver = driver in fiona.drvsupport.supported_drivers.keys()
@pytest.mark.skipif(not gdal_version.at_least((1, 11)), reason="Requires GDAL >= 1.11")
@pytest.mark.skipif(not has_driver, reason="Requires {} driver".format(driver))
def test_read_topojson(data_dir):
"""Test reading a TopoJSON file
The TopoJSON support in GDAL is a little unpredictable. In some versions
the geometries or properties aren't parsed correctly. Here we just check
that we can open the file, get the right number of features out, and
that they have a geometry and some properties. See GH#722.
"""
with fiona.open(os.path.join(data_dir, "example.topojson"), "r") as collection:
features = list(collection)
assert len(features) == 3, "unexpected number of features"
for feature in features:
assert isinstance(feature["properties"], OrderedDict)
assert len(feature["properties"]) > 0
assert feature["geometry"]["type"] in {"Point", "LineString", "Polygon"}
|
labeled_ips
|
Returns the list of all IPs
The return value looks like this flat structure::
{'network_label': 'my_network',
'network_id': 'n8v29837fn234782f08fjxk3ofhb84',
'ips': [{'address': '123.123.123.123',
'version': 4,
'type: 'fixed',
'meta': {...}},
{'address': '124.124.124.124',
'version': 4,
'type': 'floating',
'meta': {...}},
{'address': 'fe80::4',
'version': 6,
'type': 'fixed',
'meta': {...}}]
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import netaddr
from oslo_serialization import jsonutils
import six
from nova import exception
from nova.i18n import _
from nova import utils
# Constants for the 'vif_type' field in VIF class
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_IVS = 'ivs'
VIF_TYPE_DVS = 'dvs'
VIF_TYPE_IOVISOR = 'iovisor'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_HW_VEB = 'hw_veb'
VIF_TYPE_HYPERV = 'hyperv'
VIF_TYPE_HOSTDEV = 'hostdev_physical'
VIF_TYPE_IB_HOSTDEV = 'ib_hostdev'
VIF_TYPE_MIDONET = 'midonet'
VIF_TYPE_VHOSTUSER = 'vhostuser'
VIF_TYPE_VROUTER = 'vrouter'
VIF_TYPE_OTHER = 'other'
VIF_TYPE_TAP = 'tap'
VIF_TYPE_MACVTAP = 'macvtap'
VIF_TYPE_AGILIO_OVS = 'agilio_ovs'
VIF_TYPE_BINDING_FAILED = 'binding_failed'
VIF_TYPE_VIF = 'vif'
# Constants for dictionary keys in the 'vif_details' field in the VIF
# class
VIF_DETAILS_PORT_FILTER = 'port_filter'
VIF_DETAILS_OVS_HYBRID_PLUG = 'ovs_hybrid_plug'
VIF_DETAILS_PHYSICAL_NETWORK = 'physical_network'
VIF_DETAILS_BRIDGE_NAME = 'bridge_name'
VIF_DETAILS_OVS_DATAPATH_TYPE = 'datapath_type'
# The following constant defines an SR-IOV related parameter in the
# 'vif_details'. 'profileid' should be used for VIF_TYPE_802_QBH
VIF_DETAILS_PROFILEID = 'profileid'
# The following constant defines an SR-IOV and macvtap related parameter in
# the 'vif_details'. 'vlan' should be used for VIF_TYPE_HW_VEB or
# VIF_TYPE_MACVTAP
VIF_DETAILS_VLAN = 'vlan'
# The following three constants define the macvtap related fields in
# the 'vif_details'.
VIF_DETAILS_MACVTAP_SOURCE = 'macvtap_source'
VIF_DETAILS_MACVTAP_MODE = 'macvtap_mode'
VIF_DETAILS_PHYS_INTERFACE = 'physical_interface'
# Constants for vhost-user related fields in 'vif_details'.
# Sets mode on vhost-user socket, valid values are 'client'
# and 'server'
VIF_DETAILS_VHOSTUSER_MODE = 'vhostuser_mode'
# vhost-user socket path
VIF_DETAILS_VHOSTUSER_SOCKET = 'vhostuser_socket'
# Specifies whether vhost-user socket should be plugged
# into ovs bridge. Valid values are True and False
VIF_DETAILS_VHOSTUSER_OVS_PLUG = 'vhostuser_ovs_plug'
# Specifies whether vhost-user socket should be used to
# create a fp netdevice interface.
VIF_DETAILS_VHOSTUSER_FP_PLUG = 'vhostuser_fp_plug'
# Specifies whether vhost-user socket should be used to
# create a vrouter netdevice interface
# TODO(mhenkel): Consider renaming this to be contrail-specific.
VIF_DETAILS_VHOSTUSER_VROUTER_PLUG = 'vhostuser_vrouter_plug'
# Constants for dictionary keys in the 'vif_details' field that are
# valid for VIF_TYPE_TAP.
VIF_DETAILS_TAP_MAC_ADDRESS = 'mac_address'
# Open vSwitch datapath types.
VIF_DETAILS_OVS_DATAPATH_SYSTEM = 'system'
VIF_DETAILS_OVS_DATAPATH_NETDEV = 'netdev'
# Define supported virtual NIC types. VNIC_TYPE_DIRECT and VNIC_TYPE_MACVTAP
# are used for SR-IOV ports
VNIC_TYPE_NORMAL = 'normal'
VNIC_TYPE_DIRECT = 'direct'
VNIC_TYPE_MACVTAP = 'macvtap'
VNIC_TYPE_DIRECT_PHYSICAL = 'direct-physical'
VNIC_TYPE_BAREMETAL = 'baremetal'
VNIC_TYPE_VIRTIO_FORWARDER = 'virtio-forwarder'
# Define list of ports which needs pci request.
# Note: The macvtap port needs a PCI request as it is a tap interface
# with VF as the lower physical interface.
# Note: Currently, VNIC_TYPE_VIRTIO_FORWARDER assumes a 1:1
# relationship with a VF. This is expected to change in the future.
VNIC_TYPES_SRIOV = (VNIC_TYPE_DIRECT, VNIC_TYPE_MACVTAP,
VNIC_TYPE_DIRECT_PHYSICAL, VNIC_TYPE_VIRTIO_FORWARDER)
# Define list of ports which are passthrough to the guest
# and need a special treatment on snapshot and suspend/resume
VNIC_TYPES_DIRECT_PASSTHROUGH = (VNIC_TYPE_DIRECT,
VNIC_TYPE_DIRECT_PHYSICAL)
# Constants for the 'vif_model' values
VIF_MODEL_VIRTIO = 'virtio'
VIF_MODEL_NE2K_PCI = 'ne2k_pci'
VIF_MODEL_PCNET = 'pcnet'
VIF_MODEL_RTL8139 = 'rtl8139'
VIF_MODEL_E1000 = 'e1000'
VIF_MODEL_E1000E = 'e1000e'
VIF_MODEL_NETFRONT = 'netfront'
VIF_MODEL_SPAPR_VLAN = 'spapr-vlan'
VIF_MODEL_LAN9118 = 'lan9118'
VIF_MODEL_SRIOV = 'sriov'
VIF_MODEL_VMXNET = 'vmxnet'
VIF_MODEL_VMXNET3 = 'vmxnet3'
VIF_MODEL_ALL = (
VIF_MODEL_VIRTIO,
VIF_MODEL_NE2K_PCI,
VIF_MODEL_PCNET,
VIF_MODEL_RTL8139,
VIF_MODEL_E1000,
VIF_MODEL_E1000E,
VIF_MODEL_NETFRONT,
VIF_MODEL_SPAPR_VLAN,
VIF_MODEL_LAN9118,
VIF_MODEL_SRIOV,
VIF_MODEL_VMXNET,
VIF_MODEL_VMXNET3,
)
# these types have been leaked to guests in network_data.json
LEGACY_EXPOSED_VIF_TYPES = (
VIF_TYPE_BRIDGE,
VIF_TYPE_DVS,
VIF_TYPE_HW_VEB,
VIF_TYPE_HYPERV,
VIF_TYPE_OVS,
VIF_TYPE_TAP,
VIF_TYPE_VHOSTUSER,
VIF_TYPE_VIF,
)
# Constant for max length of network interface names
# eg 'bridge' in the Network class or 'devname' in
# the VIF class
NIC_NAME_LEN = 14
class Model(dict):
"""Defines some necessary structures for most of the network models."""
def __repr__(self):
return jsonutils.dumps(self)
def _set_meta(self, kwargs):
# pull meta out of kwargs if it's there
self['meta'] = kwargs.pop('meta', {})
# update meta with any additional kwargs that may exist
self['meta'].update(kwargs)
def get_meta(self, key, default=None):
"""calls get(key, default) on self['meta']."""
return self['meta'].get(key, default)
class IP(Model):
"""Represents an IP address in Nova."""
def __init__(self, address=None, type=None, **kwargs):
super(IP, self).__init__()
self['address'] = address
self['type'] = type
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
# determine version from address if not passed in
if self['address'] and not self['version']:
try:
self['version'] = netaddr.IPAddress(self['address']).version
except netaddr.AddrFormatError:
msg = _("Invalid IP format %s") % self['address']
raise exception.InvalidIpAddressError(msg)
def __eq__(self, other):
keys = ['address', 'type', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def is_in_subnet(self, subnet):
if self['address'] and subnet['cidr']:
return (netaddr.IPAddress(self['address']) in
netaddr.IPNetwork(subnet['cidr']))
else:
return False
@classmethod
def hydrate(cls, ip):
if ip:
return cls(**ip)
return None
class FixedIP(IP):
"""Represents a Fixed IP address in Nova."""
def __init__(self, floating_ips=None, **kwargs):
super(FixedIP, self).__init__(**kwargs)
self['floating_ips'] = floating_ips or []
if not self['type']:
self['type'] = 'fixed'
def add_floating_ip(self, floating_ip):
if floating_ip not in self['floating_ips']:
self['floating_ips'].append(floating_ip)
def floating_ip_addresses(self):
return [ip['address'] for ip in self['floating_ips']]
@staticmethod
def hydrate(fixed_ip):
fixed_ip = FixedIP(**fixed_ip)
fixed_ip['floating_ips'] = [IP.hydrate(floating_ip)
for floating_ip in fixed_ip['floating_ips']]
return fixed_ip
def __eq__(self, other):
keys = ['address', 'type', 'version', 'floating_ips']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class Route(Model):
"""Represents an IP Route in Nova."""
def __init__(self, cidr=None, gateway=None, interface=None, **kwargs):
super(Route, self).__init__()
self['cidr'] = cidr
self['gateway'] = gateway
# FIXME(mriedem): Is this actually used? It's never set.
self['interface'] = interface
self._set_meta(kwargs)
@classmethod
def hydrate(cls, route):
route = cls(**route)
route['gateway'] = IP.hydrate(route['gateway'])
return route
class Subnet(Model):
"""Represents a Subnet in Nova."""
def __init__(self, cidr=None, dns=None, gateway=None, ips=None,
routes=None, **kwargs):
super(Subnet, self).__init__()
self['cidr'] = cidr
self['dns'] = dns or []
self['gateway'] = gateway
self['ips'] = ips or []
self['routes'] = routes or []
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
if self['cidr'] and not self['version']:
self['version'] = netaddr.IPNetwork(self['cidr']).version
def __eq__(self, other):
keys = ['cidr', 'dns', 'gateway', 'ips', 'routes', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def add_route(self, new_route):
if new_route not in self['routes']:
self['routes'].append(new_route)
def add_dns(self, dns):
if dns not in self['dns']:
self['dns'].append(dns)
def add_ip(self, ip):
if ip not in self['ips']:
self['ips'].append(ip)
def as_netaddr(self):
"""Convenient function to get cidr as a netaddr object."""
return netaddr.IPNetwork(self['cidr'])
@classmethod
def hydrate(cls, subnet):
subnet = cls(**subnet)
subnet['dns'] = [IP.hydrate(dns) for dns in subnet['dns']]
subnet['ips'] = [FixedIP.hydrate(ip) for ip in subnet['ips']]
subnet['routes'] = [Route.hydrate(route) for route in subnet['routes']]
subnet['gateway'] = IP.hydrate(subnet['gateway'])
return subnet
class Network(Model):
"""Represents a Network in Nova."""
def __init__(self, id=None, bridge=None, label=None,
subnets=None, **kwargs):
super(Network, self).__init__()
self['id'] = id
self['bridge'] = bridge
self['label'] = label
self['subnets'] = subnets or []
self._set_meta(kwargs)
def add_subnet(self, subnet):
if subnet not in self['subnets']:
self['subnets'].append(subnet)
@classmethod
def hydrate(cls, network):
if network:
network = cls(**network)
network['subnets'] = [Subnet.hydrate(subnet)
for subnet in network['subnets']]
return network
def __eq__(self, other):
keys = ['id', 'bridge', 'label', 'subnets']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class VIF8021QbgParams(Model):
"""Represents the parameters for a 802.1qbg VIF."""
def __init__(self, managerid, typeid, typeidversion, instanceid):
super(VIF8021QbgParams, self).__init__()
self['managerid'] = managerid
self['typeid'] = typeid
self['typeidversion'] = typeidversion
self['instanceid'] = instanceid
class VIF8021QbhParams(Model):
"""Represents the parameters for a 802.1qbh VIF."""
def __init__(self, profileid):
super(VIF8021QbhParams, self).__init__()
self['profileid'] = profileid
class VIF(Model):
"""Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
details=None, devname=None, ovs_interfaceid=None,
qbh_params=None, qbg_params=None, active=False,
vnic_type=VNIC_TYPE_NORMAL, profile=None,
preserve_on_delete=False, **kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
self['type'] = type
self['details'] = details or {}
self['devname'] = devname
self['ovs_interfaceid'] = ovs_interfaceid
self['qbh_params'] = qbh_params
self['qbg_params'] = qbg_params
self['active'] = active
self['vnic_type'] = vnic_type
self['profile'] = profile
self['preserve_on_delete'] = preserve_on_delete
self._set_meta(kwargs)
def __eq__(self, other):
keys = ['id', 'address', 'network', 'vnic_type',
'type', 'profile', 'details', 'devname',
'ovs_interfaceid', 'qbh_params', 'qbg_params',
'active', 'preserve_on_delete']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def fixed_ips(self):
if self['network']:
return [fixed_ip for subnet in self['network']['subnets']
for fixed_ip in subnet['ips']]
else:
return []
def floating_ips(self):
return [floating_ip for fixed_ip in self.fixed_ips()
for floating_ip in fixed_ip['floating_ips']]
# MASKED: labeled_ips function (lines 425-457)
def is_hybrid_plug_enabled(self):
return self['details'].get(VIF_DETAILS_OVS_HYBRID_PLUG, False)
def is_neutron_filtering_enabled(self):
return self['details'].get(VIF_DETAILS_PORT_FILTER, False)
def get_physical_network(self):
phy_network = self['network']['meta'].get('physical_network')
if not phy_network:
phy_network = self['details'].get(VIF_DETAILS_PHYSICAL_NETWORK)
return phy_network
@classmethod
def hydrate(cls, vif):
vif = cls(**vif)
vif['network'] = Network.hydrate(vif['network'])
return vif
def get_netmask(ip, subnet):
"""Returns the netmask appropriate for injection into a guest."""
if ip['version'] == 4:
return str(subnet.as_netaddr().netmask)
return subnet.as_netaddr()._prefixlen
class NetworkInfo(list):
"""Stores and manipulates network information for a Nova instance."""
# NetworkInfo is a list of VIFs
def fixed_ips(self):
"""Returns all fixed_ips without floating_ips attached."""
return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self):
"""Returns all floating_ips."""
return [ip for vif in self for ip in vif.floating_ips()]
@classmethod
def hydrate(cls, network_info):
if isinstance(network_info, six.string_types):
network_info = jsonutils.loads(network_info)
return cls([VIF.hydrate(vif) for vif in network_info])
def wait(self, do_raise=True):
"""Wait for asynchronous call to finish."""
# There is no asynchronous call for this class, so this is a no-op
# here, but subclasses may override to provide asynchronous
# capabilities. Must be defined here in the parent class so that code
# which works with both parent and subclass types can reference this
# method.
pass
def json(self):
return jsonutils.dumps(self)
class NetworkInfoAsyncWrapper(NetworkInfo):
"""Wrapper around NetworkInfo that allows retrieving NetworkInfo
in an async manner.
This allows one to start querying for network information before
you know you will need it. If you have a long-running
operation, this allows the network model retrieval to occur in the
background. When you need the data, it will ensure the async
operation has completed.
As an example:
def allocate_net_info(arg1, arg2)
return call_neutron_to_allocate(arg1, arg2)
network_info = NetworkInfoAsyncWrapper(allocate_net_info, arg1, arg2)
[do a long running operation -- real network_info will be retrieved
in the background]
[do something with network_info]
"""
def __init__(self, async_method, *args, **kwargs):
super(NetworkInfoAsyncWrapper, self).__init__()
self._gt = utils.spawn(async_method, *args, **kwargs)
methods = ['json', 'fixed_ips', 'floating_ips']
for method in methods:
fn = getattr(self, method)
wrapper = functools.partial(self._sync_wrapper, fn)
functools.update_wrapper(wrapper, fn)
setattr(self, method, wrapper)
def _sync_wrapper(self, wrapped, *args, **kwargs):
"""Synchronize the model before running a method."""
self.wait()
return wrapped(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__getitem__
return self._sync_wrapper(fn, *args, **kwargs)
def __iter__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__iter__
return self._sync_wrapper(fn, *args, **kwargs)
def __len__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__len__
return self._sync_wrapper(fn, *args, **kwargs)
def __str__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__str__
return self._sync_wrapper(fn, *args, **kwargs)
def __repr__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__repr__
return self._sync_wrapper(fn, *args, **kwargs)
def wait(self, do_raise=True):
"""Wait for asynchronous call to finish."""
if self._gt is not None:
try:
# NOTE(comstud): This looks funky, but this object is
# subclassed from list. In other words, 'self' is really
# just a list with a bunch of extra methods. So this
# line just replaces the current list (which should be
# empty) with the result.
self[:] = self._gt.wait()
except Exception:
if do_raise:
raise
finally:
self._gt = None
|
def labeled_ips(self):
"""Returns the list of all IPs
The return value looks like this flat structure::
{'network_label': 'my_network',
'network_id': 'n8v29837fn234782f08fjxk3ofhb84',
'ips': [{'address': '123.123.123.123',
'version': 4,
'type: 'fixed',
'meta': {...}},
{'address': '124.124.124.124',
'version': 4,
'type': 'floating',
'meta': {...}},
{'address': 'fe80::4',
'version': 6,
'type': 'fixed',
'meta': {...}}]
"""
if self['network']:
# remove unnecessary fields on fixed_ips
ips = [IP(**ip) for ip in self.fixed_ips()]
for ip in ips:
# remove floating ips from IP, since this is a flat structure
# of all IPs
del ip['meta']['floating_ips']
# add floating ips to list (if any)
ips.extend(self.floating_ips())
return {'network_label': self['network']['label'],
'network_id': self['network']['id'],
'ips': ips}
return []
| 425 | 457 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import netaddr
from oslo_serialization import jsonutils
import six
from nova import exception
from nova.i18n import _
from nova import utils
# Constants for the 'vif_type' field in VIF class
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_IVS = 'ivs'
VIF_TYPE_DVS = 'dvs'
VIF_TYPE_IOVISOR = 'iovisor'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_HW_VEB = 'hw_veb'
VIF_TYPE_HYPERV = 'hyperv'
VIF_TYPE_HOSTDEV = 'hostdev_physical'
VIF_TYPE_IB_HOSTDEV = 'ib_hostdev'
VIF_TYPE_MIDONET = 'midonet'
VIF_TYPE_VHOSTUSER = 'vhostuser'
VIF_TYPE_VROUTER = 'vrouter'
VIF_TYPE_OTHER = 'other'
VIF_TYPE_TAP = 'tap'
VIF_TYPE_MACVTAP = 'macvtap'
VIF_TYPE_AGILIO_OVS = 'agilio_ovs'
VIF_TYPE_BINDING_FAILED = 'binding_failed'
VIF_TYPE_VIF = 'vif'
# Constants for dictionary keys in the 'vif_details' field in the VIF
# class
VIF_DETAILS_PORT_FILTER = 'port_filter'
VIF_DETAILS_OVS_HYBRID_PLUG = 'ovs_hybrid_plug'
VIF_DETAILS_PHYSICAL_NETWORK = 'physical_network'
VIF_DETAILS_BRIDGE_NAME = 'bridge_name'
VIF_DETAILS_OVS_DATAPATH_TYPE = 'datapath_type'
# The following constant defines an SR-IOV related parameter in the
# 'vif_details'. 'profileid' should be used for VIF_TYPE_802_QBH
VIF_DETAILS_PROFILEID = 'profileid'
# The following constant defines an SR-IOV and macvtap related parameter in
# the 'vif_details'. 'vlan' should be used for VIF_TYPE_HW_VEB or
# VIF_TYPE_MACVTAP
VIF_DETAILS_VLAN = 'vlan'
# The following three constants define the macvtap related fields in
# the 'vif_details'.
VIF_DETAILS_MACVTAP_SOURCE = 'macvtap_source'
VIF_DETAILS_MACVTAP_MODE = 'macvtap_mode'
VIF_DETAILS_PHYS_INTERFACE = 'physical_interface'
# Constants for vhost-user related fields in 'vif_details'.
# Sets mode on vhost-user socket, valid values are 'client'
# and 'server'
VIF_DETAILS_VHOSTUSER_MODE = 'vhostuser_mode'
# vhost-user socket path
VIF_DETAILS_VHOSTUSER_SOCKET = 'vhostuser_socket'
# Specifies whether vhost-user socket should be plugged
# into ovs bridge. Valid values are True and False
VIF_DETAILS_VHOSTUSER_OVS_PLUG = 'vhostuser_ovs_plug'
# Specifies whether vhost-user socket should be used to
# create a fp netdevice interface.
VIF_DETAILS_VHOSTUSER_FP_PLUG = 'vhostuser_fp_plug'
# Specifies whether vhost-user socket should be used to
# create a vrouter netdevice interface
# TODO(mhenkel): Consider renaming this to be contrail-specific.
VIF_DETAILS_VHOSTUSER_VROUTER_PLUG = 'vhostuser_vrouter_plug'
# Constants for dictionary keys in the 'vif_details' field that are
# valid for VIF_TYPE_TAP.
VIF_DETAILS_TAP_MAC_ADDRESS = 'mac_address'
# Open vSwitch datapath types.
VIF_DETAILS_OVS_DATAPATH_SYSTEM = 'system'
VIF_DETAILS_OVS_DATAPATH_NETDEV = 'netdev'
# Define supported virtual NIC types. VNIC_TYPE_DIRECT and VNIC_TYPE_MACVTAP
# are used for SR-IOV ports
VNIC_TYPE_NORMAL = 'normal'
VNIC_TYPE_DIRECT = 'direct'
VNIC_TYPE_MACVTAP = 'macvtap'
VNIC_TYPE_DIRECT_PHYSICAL = 'direct-physical'
VNIC_TYPE_BAREMETAL = 'baremetal'
VNIC_TYPE_VIRTIO_FORWARDER = 'virtio-forwarder'
# Define list of ports which needs pci request.
# Note: The macvtap port needs a PCI request as it is a tap interface
# with VF as the lower physical interface.
# Note: Currently, VNIC_TYPE_VIRTIO_FORWARDER assumes a 1:1
# relationship with a VF. This is expected to change in the future.
VNIC_TYPES_SRIOV = (VNIC_TYPE_DIRECT, VNIC_TYPE_MACVTAP,
VNIC_TYPE_DIRECT_PHYSICAL, VNIC_TYPE_VIRTIO_FORWARDER)
# Define list of ports which are passthrough to the guest
# and need a special treatment on snapshot and suspend/resume
VNIC_TYPES_DIRECT_PASSTHROUGH = (VNIC_TYPE_DIRECT,
VNIC_TYPE_DIRECT_PHYSICAL)
# Constants for the 'vif_model' values
VIF_MODEL_VIRTIO = 'virtio'
VIF_MODEL_NE2K_PCI = 'ne2k_pci'
VIF_MODEL_PCNET = 'pcnet'
VIF_MODEL_RTL8139 = 'rtl8139'
VIF_MODEL_E1000 = 'e1000'
VIF_MODEL_E1000E = 'e1000e'
VIF_MODEL_NETFRONT = 'netfront'
VIF_MODEL_SPAPR_VLAN = 'spapr-vlan'
VIF_MODEL_LAN9118 = 'lan9118'
VIF_MODEL_SRIOV = 'sriov'
VIF_MODEL_VMXNET = 'vmxnet'
VIF_MODEL_VMXNET3 = 'vmxnet3'
VIF_MODEL_ALL = (
VIF_MODEL_VIRTIO,
VIF_MODEL_NE2K_PCI,
VIF_MODEL_PCNET,
VIF_MODEL_RTL8139,
VIF_MODEL_E1000,
VIF_MODEL_E1000E,
VIF_MODEL_NETFRONT,
VIF_MODEL_SPAPR_VLAN,
VIF_MODEL_LAN9118,
VIF_MODEL_SRIOV,
VIF_MODEL_VMXNET,
VIF_MODEL_VMXNET3,
)
# these types have been leaked to guests in network_data.json
LEGACY_EXPOSED_VIF_TYPES = (
VIF_TYPE_BRIDGE,
VIF_TYPE_DVS,
VIF_TYPE_HW_VEB,
VIF_TYPE_HYPERV,
VIF_TYPE_OVS,
VIF_TYPE_TAP,
VIF_TYPE_VHOSTUSER,
VIF_TYPE_VIF,
)
# Constant for max length of network interface names
# eg 'bridge' in the Network class or 'devname' in
# the VIF class
NIC_NAME_LEN = 14
class Model(dict):
"""Defines some necessary structures for most of the network models."""
def __repr__(self):
return jsonutils.dumps(self)
def _set_meta(self, kwargs):
# pull meta out of kwargs if it's there
self['meta'] = kwargs.pop('meta', {})
# update meta with any additional kwargs that may exist
self['meta'].update(kwargs)
def get_meta(self, key, default=None):
"""calls get(key, default) on self['meta']."""
return self['meta'].get(key, default)
class IP(Model):
"""Represents an IP address in Nova."""
def __init__(self, address=None, type=None, **kwargs):
super(IP, self).__init__()
self['address'] = address
self['type'] = type
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
# determine version from address if not passed in
if self['address'] and not self['version']:
try:
self['version'] = netaddr.IPAddress(self['address']).version
except netaddr.AddrFormatError:
msg = _("Invalid IP format %s") % self['address']
raise exception.InvalidIpAddressError(msg)
def __eq__(self, other):
keys = ['address', 'type', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def is_in_subnet(self, subnet):
if self['address'] and subnet['cidr']:
return (netaddr.IPAddress(self['address']) in
netaddr.IPNetwork(subnet['cidr']))
else:
return False
@classmethod
def hydrate(cls, ip):
if ip:
return cls(**ip)
return None
class FixedIP(IP):
"""Represents a Fixed IP address in Nova."""
def __init__(self, floating_ips=None, **kwargs):
super(FixedIP, self).__init__(**kwargs)
self['floating_ips'] = floating_ips or []
if not self['type']:
self['type'] = 'fixed'
def add_floating_ip(self, floating_ip):
if floating_ip not in self['floating_ips']:
self['floating_ips'].append(floating_ip)
def floating_ip_addresses(self):
return [ip['address'] for ip in self['floating_ips']]
@staticmethod
def hydrate(fixed_ip):
fixed_ip = FixedIP(**fixed_ip)
fixed_ip['floating_ips'] = [IP.hydrate(floating_ip)
for floating_ip in fixed_ip['floating_ips']]
return fixed_ip
def __eq__(self, other):
keys = ['address', 'type', 'version', 'floating_ips']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class Route(Model):
"""Represents an IP Route in Nova."""
def __init__(self, cidr=None, gateway=None, interface=None, **kwargs):
super(Route, self).__init__()
self['cidr'] = cidr
self['gateway'] = gateway
# FIXME(mriedem): Is this actually used? It's never set.
self['interface'] = interface
self._set_meta(kwargs)
@classmethod
def hydrate(cls, route):
route = cls(**route)
route['gateway'] = IP.hydrate(route['gateway'])
return route
class Subnet(Model):
"""Represents a Subnet in Nova."""
def __init__(self, cidr=None, dns=None, gateway=None, ips=None,
routes=None, **kwargs):
super(Subnet, self).__init__()
self['cidr'] = cidr
self['dns'] = dns or []
self['gateway'] = gateway
self['ips'] = ips or []
self['routes'] = routes or []
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
if self['cidr'] and not self['version']:
self['version'] = netaddr.IPNetwork(self['cidr']).version
def __eq__(self, other):
keys = ['cidr', 'dns', 'gateway', 'ips', 'routes', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def add_route(self, new_route):
if new_route not in self['routes']:
self['routes'].append(new_route)
def add_dns(self, dns):
if dns not in self['dns']:
self['dns'].append(dns)
def add_ip(self, ip):
if ip not in self['ips']:
self['ips'].append(ip)
def as_netaddr(self):
"""Convenient function to get cidr as a netaddr object."""
return netaddr.IPNetwork(self['cidr'])
@classmethod
def hydrate(cls, subnet):
subnet = cls(**subnet)
subnet['dns'] = [IP.hydrate(dns) for dns in subnet['dns']]
subnet['ips'] = [FixedIP.hydrate(ip) for ip in subnet['ips']]
subnet['routes'] = [Route.hydrate(route) for route in subnet['routes']]
subnet['gateway'] = IP.hydrate(subnet['gateway'])
return subnet
class Network(Model):
"""Represents a Network in Nova."""
def __init__(self, id=None, bridge=None, label=None,
subnets=None, **kwargs):
super(Network, self).__init__()
self['id'] = id
self['bridge'] = bridge
self['label'] = label
self['subnets'] = subnets or []
self._set_meta(kwargs)
def add_subnet(self, subnet):
if subnet not in self['subnets']:
self['subnets'].append(subnet)
@classmethod
def hydrate(cls, network):
if network:
network = cls(**network)
network['subnets'] = [Subnet.hydrate(subnet)
for subnet in network['subnets']]
return network
def __eq__(self, other):
keys = ['id', 'bridge', 'label', 'subnets']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class VIF8021QbgParams(Model):
"""Represents the parameters for a 802.1qbg VIF."""
def __init__(self, managerid, typeid, typeidversion, instanceid):
super(VIF8021QbgParams, self).__init__()
self['managerid'] = managerid
self['typeid'] = typeid
self['typeidversion'] = typeidversion
self['instanceid'] = instanceid
class VIF8021QbhParams(Model):
"""Represents the parameters for a 802.1qbh VIF."""
def __init__(self, profileid):
super(VIF8021QbhParams, self).__init__()
self['profileid'] = profileid
class VIF(Model):
"""Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
details=None, devname=None, ovs_interfaceid=None,
qbh_params=None, qbg_params=None, active=False,
vnic_type=VNIC_TYPE_NORMAL, profile=None,
preserve_on_delete=False, **kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
self['type'] = type
self['details'] = details or {}
self['devname'] = devname
self['ovs_interfaceid'] = ovs_interfaceid
self['qbh_params'] = qbh_params
self['qbg_params'] = qbg_params
self['active'] = active
self['vnic_type'] = vnic_type
self['profile'] = profile
self['preserve_on_delete'] = preserve_on_delete
self._set_meta(kwargs)
def __eq__(self, other):
keys = ['id', 'address', 'network', 'vnic_type',
'type', 'profile', 'details', 'devname',
'ovs_interfaceid', 'qbh_params', 'qbg_params',
'active', 'preserve_on_delete']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def fixed_ips(self):
if self['network']:
return [fixed_ip for subnet in self['network']['subnets']
for fixed_ip in subnet['ips']]
else:
return []
def floating_ips(self):
return [floating_ip for fixed_ip in self.fixed_ips()
for floating_ip in fixed_ip['floating_ips']]
def labeled_ips(self):
"""Returns the list of all IPs
The return value looks like this flat structure::
{'network_label': 'my_network',
'network_id': 'n8v29837fn234782f08fjxk3ofhb84',
'ips': [{'address': '123.123.123.123',
'version': 4,
'type: 'fixed',
'meta': {...}},
{'address': '124.124.124.124',
'version': 4,
'type': 'floating',
'meta': {...}},
{'address': 'fe80::4',
'version': 6,
'type': 'fixed',
'meta': {...}}]
"""
if self['network']:
# remove unnecessary fields on fixed_ips
ips = [IP(**ip) for ip in self.fixed_ips()]
for ip in ips:
# remove floating ips from IP, since this is a flat structure
# of all IPs
del ip['meta']['floating_ips']
# add floating ips to list (if any)
ips.extend(self.floating_ips())
return {'network_label': self['network']['label'],
'network_id': self['network']['id'],
'ips': ips}
return []
def is_hybrid_plug_enabled(self):
return self['details'].get(VIF_DETAILS_OVS_HYBRID_PLUG, False)
def is_neutron_filtering_enabled(self):
return self['details'].get(VIF_DETAILS_PORT_FILTER, False)
def get_physical_network(self):
phy_network = self['network']['meta'].get('physical_network')
if not phy_network:
phy_network = self['details'].get(VIF_DETAILS_PHYSICAL_NETWORK)
return phy_network
@classmethod
def hydrate(cls, vif):
vif = cls(**vif)
vif['network'] = Network.hydrate(vif['network'])
return vif
def get_netmask(ip, subnet):
"""Returns the netmask appropriate for injection into a guest."""
if ip['version'] == 4:
return str(subnet.as_netaddr().netmask)
return subnet.as_netaddr()._prefixlen
class NetworkInfo(list):
"""Stores and manipulates network information for a Nova instance."""
# NetworkInfo is a list of VIFs
def fixed_ips(self):
"""Returns all fixed_ips without floating_ips attached."""
return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self):
"""Returns all floating_ips."""
return [ip for vif in self for ip in vif.floating_ips()]
@classmethod
def hydrate(cls, network_info):
if isinstance(network_info, six.string_types):
network_info = jsonutils.loads(network_info)
return cls([VIF.hydrate(vif) for vif in network_info])
def wait(self, do_raise=True):
"""Wait for asynchronous call to finish."""
# There is no asynchronous call for this class, so this is a no-op
# here, but subclasses may override to provide asynchronous
# capabilities. Must be defined here in the parent class so that code
# which works with both parent and subclass types can reference this
# method.
pass
def json(self):
return jsonutils.dumps(self)
class NetworkInfoAsyncWrapper(NetworkInfo):
"""Wrapper around NetworkInfo that allows retrieving NetworkInfo
in an async manner.
This allows one to start querying for network information before
you know you will need it. If you have a long-running
operation, this allows the network model retrieval to occur in the
background. When you need the data, it will ensure the async
operation has completed.
As an example:
def allocate_net_info(arg1, arg2)
return call_neutron_to_allocate(arg1, arg2)
network_info = NetworkInfoAsyncWrapper(allocate_net_info, arg1, arg2)
[do a long running operation -- real network_info will be retrieved
in the background]
[do something with network_info]
"""
def __init__(self, async_method, *args, **kwargs):
super(NetworkInfoAsyncWrapper, self).__init__()
self._gt = utils.spawn(async_method, *args, **kwargs)
methods = ['json', 'fixed_ips', 'floating_ips']
for method in methods:
fn = getattr(self, method)
wrapper = functools.partial(self._sync_wrapper, fn)
functools.update_wrapper(wrapper, fn)
setattr(self, method, wrapper)
def _sync_wrapper(self, wrapped, *args, **kwargs):
"""Synchronize the model before running a method."""
self.wait()
return wrapped(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__getitem__
return self._sync_wrapper(fn, *args, **kwargs)
def __iter__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__iter__
return self._sync_wrapper(fn, *args, **kwargs)
def __len__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__len__
return self._sync_wrapper(fn, *args, **kwargs)
def __str__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__str__
return self._sync_wrapper(fn, *args, **kwargs)
def __repr__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__repr__
return self._sync_wrapper(fn, *args, **kwargs)
def wait(self, do_raise=True):
"""Wait for asynchronous call to finish."""
if self._gt is not None:
try:
# NOTE(comstud): This looks funky, but this object is
# subclassed from list. In other words, 'self' is really
# just a list with a bunch of extra methods. So this
# line just replaces the current list (which should be
# empty) with the result.
self[:] = self._gt.wait()
except Exception:
if do_raise:
raise
finally:
self._gt = None
|
_replace_placeholder_with
|
Substitute *element* for this placeholder element in the shapetree.
This placeholder's `._element` attribute is set to |None| and its
original element is free for garbage collection. Any attribute access
(including a method call) on this placeholder after this call raises
|AttributeError|.
|
# encoding: utf-8
"""Placeholder-related objects.
Specific to shapes having a `p:ph` element. A placeholder has distinct behaviors
depending on whether it appears on a slide, layout, or master. Hence there is a
non-trivial class inheritance structure.
"""
from pptx.enum.shapes import MSO_SHAPE_TYPE, PP_PLACEHOLDER
from pptx.oxml.shapes.graphfrm import CT_GraphicalObjectFrame
from pptx.oxml.shapes.picture import CT_Picture
from pptx.shapes.autoshape import Shape
from pptx.shapes.graphfrm import GraphicFrame
from pptx.shapes.picture import Picture
from pptx.util import Emu
class _InheritsDimensions(object):
"""
Mixin class that provides inherited dimension behavior. Specifically,
left, top, width, and height report the value from the layout placeholder
where they would have otherwise reported |None|. This behavior is
distinctive to placeholders. :meth:`_base_placeholder` must be overridden
by all subclasses to provide lookup of the appropriate base placeholder
to inherit from.
"""
@property
def height(self):
"""
The effective height of this placeholder shape; its directly-applied
height if it has one, otherwise the height of its parent layout
placeholder.
"""
return self._effective_value("height")
@height.setter
def height(self, value):
self._element.cy = value
@property
def left(self):
"""
The effective left of this placeholder shape; its directly-applied
left if it has one, otherwise the left of its parent layout
placeholder.
"""
return self._effective_value("left")
@left.setter
def left(self, value):
self._element.x = value
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def top(self):
"""
The effective top of this placeholder shape; its directly-applied
top if it has one, otherwise the top of its parent layout
placeholder.
"""
return self._effective_value("top")
@top.setter
def top(self, value):
self._element.y = value
@property
def width(self):
"""
The effective width of this placeholder shape; its directly-applied
width if it has one, otherwise the width of its parent layout
placeholder.
"""
return self._effective_value("width")
@width.setter
def width(self, value):
self._element.cx = value
@property
def _base_placeholder(self):
"""
Return the layout or master placeholder shape this placeholder
inherits from. Not to be confused with an instance of
|BasePlaceholder| (necessarily).
"""
raise NotImplementedError("Must be implemented by all subclasses.")
def _effective_value(self, attr_name):
"""
The effective value of *attr_name* on this placeholder shape; its
directly-applied value if it has one, otherwise the value on the
layout placeholder it inherits from.
"""
directly_applied_value = getattr(super(_InheritsDimensions, self), attr_name)
if directly_applied_value is not None:
return directly_applied_value
return self._inherited_value(attr_name)
def _inherited_value(self, attr_name):
"""
Return the attribute value, e.g. 'width' of the base placeholder this
placeholder inherits from.
"""
base_placeholder = self._base_placeholder
if base_placeholder is None:
return None
inherited_value = getattr(base_placeholder, attr_name)
return inherited_value
class _BaseSlidePlaceholder(_InheritsDimensions, Shape):
"""Base class for placeholders on slides.
Provides common behaviors such as inherited dimensions.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def _base_placeholder(self):
"""
Return the layout placeholder this slide placeholder inherits from.
Not to be confused with an instance of |BasePlaceholder|
(necessarily).
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
# MASKED: _replace_placeholder_with function (lines 155-166)
class BasePlaceholder(Shape):
"""
NOTE: This class is deprecated and will be removed from a future release
along with the properties *idx*, *orient*, *ph_type*, and *sz*. The *idx*
property will be available via the .placeholder_format property. The
others will be accessed directly from the oxml layer as they are only
used for internal purposes.
Base class for placeholder subclasses that differentiate the varying
behaviors of placeholders on a master, layout, and slide.
"""
@property
def idx(self):
"""
Integer placeholder 'idx' attribute, e.g. 0
"""
return self._sp.ph_idx
@property
def orient(self):
"""
Placeholder orientation, e.g. ST_Direction.HORZ
"""
return self._sp.ph_orient
@property
def ph_type(self):
"""
Placeholder type, e.g. PP_PLACEHOLDER.CENTER_TITLE
"""
return self._sp.ph_type
@property
def sz(self):
"""
Placeholder 'sz' attribute, e.g. ST_PlaceholderSize.FULL
"""
return self._sp.ph_sz
class LayoutPlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a slide layout, providing differentiated behavior
for slide layout placeholders, in particular, inheriting shape properties
from the master placeholder having the same type, when a matching one
exists.
"""
@property
def _base_placeholder(self):
"""
Return the master placeholder this layout placeholder inherits from.
"""
base_ph_type = {
PP_PLACEHOLDER.BODY: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.BITMAP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CENTER_TITLE: PP_PLACEHOLDER.TITLE,
PP_PLACEHOLDER.ORG_CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.DATE: PP_PLACEHOLDER.DATE,
PP_PLACEHOLDER.FOOTER: PP_PLACEHOLDER.FOOTER,
PP_PLACEHOLDER.MEDIA_CLIP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.OBJECT: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.PICTURE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.SLIDE_NUMBER: PP_PLACEHOLDER.SLIDE_NUMBER,
PP_PLACEHOLDER.SUBTITLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TABLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TITLE: PP_PLACEHOLDER.TITLE,
}[self._element.ph_type]
slide_master = self.part.slide_master
return slide_master.placeholders.get(base_ph_type, None)
class MasterPlaceholder(BasePlaceholder):
"""
Placeholder shape on a slide master.
"""
class NotesSlidePlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a notes slide. Inherits shape properties from the
placeholder on the notes master that has the same type (e.g. 'body').
"""
@property
def _base_placeholder(self):
"""
Return the notes master placeholder this notes slide placeholder
inherits from, or |None| if no placeholder of the matching type is
present.
"""
notes_master = self.part.notes_master
ph_type = self.element.ph_type
return notes_master.placeholders.get(ph_type=ph_type)
class SlidePlaceholder(_BaseSlidePlaceholder):
"""
Placeholder shape on a slide. Inherits shape properties from its
corresponding slide layout placeholder.
"""
class ChartPlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a chart."""
def insert_chart(self, chart_type, chart_data):
"""
Return a |PlaceholderGraphicFrame| object containing a new chart of
*chart_type* depicting *chart_data* and having the same position and
size as this placeholder. *chart_type* is one of the
:ref:`XlChartType` enumeration values. *chart_data* is a |ChartData|
object populated with the categories and series values for the chart.
Note that the new |Chart| object is not returned directly. The chart
object may be accessed using the
:attr:`~.PlaceholderGraphicFrame.chart` property of the returned
|PlaceholderGraphicFrame| object.
"""
rId = self.part.add_chart_part(chart_type, chart_data)
graphicFrame = self._new_chart_graphicFrame(
rId, self.left, self.top, self.width, self.height
)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_chart_graphicFrame(self, rId, x, y, cx, cy):
"""
Return a newly created `p:graphicFrame` element having the specified
position and size and containing the chart identified by *rId*.
"""
id_, name = self.shape_id, self.name
return CT_GraphicalObjectFrame.new_chart_graphicFrame(
id_, name, rId, x, y, cx, cy
)
class PicturePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a picture."""
def insert_picture(self, image_file):
"""Return a |PlaceholderPicture| object depicting the image in `image_file`.
`image_file` may be either a path (string) or a file-like object. The image is
cropped to fill the entire space of the placeholder. A |PlaceholderPicture|
object has all the properties and methods of a |Picture| shape except that the
value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is
`MSO_SHAPE_TYPE.PLACEHOLDER` instead of `MSO_SHAPE_TYPE.PICTURE`.
"""
pic = self._new_placeholder_pic(image_file)
self._replace_placeholder_with(pic)
return PlaceholderPicture(pic, self._parent)
def _new_placeholder_pic(self, image_file):
"""
Return a new `p:pic` element depicting the image in *image_file*,
suitable for use as a placeholder. In particular this means not
having an `a:xfrm` element, allowing its extents to be inherited from
its layout placeholder.
"""
rId, desc, image_size = self._get_or_add_image(image_file)
shape_id, name = self.shape_id, self.name
pic = CT_Picture.new_ph_pic(shape_id, name, desc, rId)
pic.crop_to_fit(image_size, (self.width, self.height))
return pic
def _get_or_add_image(self, image_file):
"""
Return an (rId, description, image_size) 3-tuple identifying the
related image part containing *image_file* and describing the image.
"""
image_part, rId = self.part.get_or_add_image_part(image_file)
desc, image_size = image_part.desc, image_part._px_size
return rId, desc, image_size
class PlaceholderGraphicFrame(GraphicFrame):
"""
Placeholder shape populated with a table, chart, or smart art.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
class PlaceholderPicture(_InheritsDimensions, Picture):
"""
Placeholder shape populated with a picture.
"""
@property
def _base_placeholder(self):
"""
Return the layout placeholder this picture placeholder inherits from.
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
class TablePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a table."""
def insert_table(self, rows, cols):
"""Return |PlaceholderGraphicFrame| object containing a `rows` by `cols` table.
The position and width of the table are those of the placeholder and its height
is proportional to the number of rows. A |PlaceholderGraphicFrame| object has
all the properties and methods of a |GraphicFrame| shape except that the value
of its :attr:`~._BaseSlidePlaceholder.shape_type` property is unconditionally
`MSO_SHAPE_TYPE.PLACEHOLDER`. Note that the return value is not the new table
but rather *contains* the new table. The table can be accessed using the
:attr:`~.PlaceholderGraphicFrame.table` property of the returned
|PlaceholderGraphicFrame| object.
"""
graphicFrame = self._new_placeholder_table(rows, cols)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_placeholder_table(self, rows, cols):
"""
Return a newly added `p:graphicFrame` element containing an empty
table with *rows* rows and *cols* columns, positioned at the location
of this placeholder and having its same width. The table's height is
determined by the number of rows.
"""
shape_id, name, height = self.shape_id, self.name, Emu(rows * 370840)
return CT_GraphicalObjectFrame.new_table_graphicFrame(
shape_id, name, rows, cols, self.left, self.top, self.width, height
)
|
def _replace_placeholder_with(self, element):
"""
Substitute *element* for this placeholder element in the shapetree.
This placeholder's `._element` attribute is set to |None| and its
original element is free for garbage collection. Any attribute access
(including a method call) on this placeholder after this call raises
|AttributeError|.
"""
element._nvXxPr.nvPr._insert_ph(self._element.ph)
self._element.addprevious(element)
self._element.getparent().remove(self._element)
self._element = None
| 155 | 166 |
# encoding: utf-8
"""Placeholder-related objects.
Specific to shapes having a `p:ph` element. A placeholder has distinct behaviors
depending on whether it appears on a slide, layout, or master. Hence there is a
non-trivial class inheritance structure.
"""
from pptx.enum.shapes import MSO_SHAPE_TYPE, PP_PLACEHOLDER
from pptx.oxml.shapes.graphfrm import CT_GraphicalObjectFrame
from pptx.oxml.shapes.picture import CT_Picture
from pptx.shapes.autoshape import Shape
from pptx.shapes.graphfrm import GraphicFrame
from pptx.shapes.picture import Picture
from pptx.util import Emu
class _InheritsDimensions(object):
"""
Mixin class that provides inherited dimension behavior. Specifically,
left, top, width, and height report the value from the layout placeholder
where they would have otherwise reported |None|. This behavior is
distinctive to placeholders. :meth:`_base_placeholder` must be overridden
by all subclasses to provide lookup of the appropriate base placeholder
to inherit from.
"""
@property
def height(self):
"""
The effective height of this placeholder shape; its directly-applied
height if it has one, otherwise the height of its parent layout
placeholder.
"""
return self._effective_value("height")
@height.setter
def height(self, value):
self._element.cy = value
@property
def left(self):
"""
The effective left of this placeholder shape; its directly-applied
left if it has one, otherwise the left of its parent layout
placeholder.
"""
return self._effective_value("left")
@left.setter
def left(self, value):
self._element.x = value
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def top(self):
"""
The effective top of this placeholder shape; its directly-applied
top if it has one, otherwise the top of its parent layout
placeholder.
"""
return self._effective_value("top")
@top.setter
def top(self, value):
self._element.y = value
@property
def width(self):
"""
The effective width of this placeholder shape; its directly-applied
width if it has one, otherwise the width of its parent layout
placeholder.
"""
return self._effective_value("width")
@width.setter
def width(self, value):
self._element.cx = value
@property
def _base_placeholder(self):
"""
Return the layout or master placeholder shape this placeholder
inherits from. Not to be confused with an instance of
|BasePlaceholder| (necessarily).
"""
raise NotImplementedError("Must be implemented by all subclasses.")
def _effective_value(self, attr_name):
"""
The effective value of *attr_name* on this placeholder shape; its
directly-applied value if it has one, otherwise the value on the
layout placeholder it inherits from.
"""
directly_applied_value = getattr(super(_InheritsDimensions, self), attr_name)
if directly_applied_value is not None:
return directly_applied_value
return self._inherited_value(attr_name)
def _inherited_value(self, attr_name):
"""
Return the attribute value, e.g. 'width' of the base placeholder this
placeholder inherits from.
"""
base_placeholder = self._base_placeholder
if base_placeholder is None:
return None
inherited_value = getattr(base_placeholder, attr_name)
return inherited_value
class _BaseSlidePlaceholder(_InheritsDimensions, Shape):
"""Base class for placeholders on slides.
Provides common behaviors such as inherited dimensions.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def _base_placeholder(self):
"""
Return the layout placeholder this slide placeholder inherits from.
Not to be confused with an instance of |BasePlaceholder|
(necessarily).
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
def _replace_placeholder_with(self, element):
"""
Substitute *element* for this placeholder element in the shapetree.
This placeholder's `._element` attribute is set to |None| and its
original element is free for garbage collection. Any attribute access
(including a method call) on this placeholder after this call raises
|AttributeError|.
"""
element._nvXxPr.nvPr._insert_ph(self._element.ph)
self._element.addprevious(element)
self._element.getparent().remove(self._element)
self._element = None
class BasePlaceholder(Shape):
"""
NOTE: This class is deprecated and will be removed from a future release
along with the properties *idx*, *orient*, *ph_type*, and *sz*. The *idx*
property will be available via the .placeholder_format property. The
others will be accessed directly from the oxml layer as they are only
used for internal purposes.
Base class for placeholder subclasses that differentiate the varying
behaviors of placeholders on a master, layout, and slide.
"""
@property
def idx(self):
"""
Integer placeholder 'idx' attribute, e.g. 0
"""
return self._sp.ph_idx
@property
def orient(self):
"""
Placeholder orientation, e.g. ST_Direction.HORZ
"""
return self._sp.ph_orient
@property
def ph_type(self):
"""
Placeholder type, e.g. PP_PLACEHOLDER.CENTER_TITLE
"""
return self._sp.ph_type
@property
def sz(self):
"""
Placeholder 'sz' attribute, e.g. ST_PlaceholderSize.FULL
"""
return self._sp.ph_sz
class LayoutPlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a slide layout, providing differentiated behavior
for slide layout placeholders, in particular, inheriting shape properties
from the master placeholder having the same type, when a matching one
exists.
"""
@property
def _base_placeholder(self):
"""
Return the master placeholder this layout placeholder inherits from.
"""
base_ph_type = {
PP_PLACEHOLDER.BODY: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.BITMAP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CENTER_TITLE: PP_PLACEHOLDER.TITLE,
PP_PLACEHOLDER.ORG_CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.DATE: PP_PLACEHOLDER.DATE,
PP_PLACEHOLDER.FOOTER: PP_PLACEHOLDER.FOOTER,
PP_PLACEHOLDER.MEDIA_CLIP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.OBJECT: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.PICTURE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.SLIDE_NUMBER: PP_PLACEHOLDER.SLIDE_NUMBER,
PP_PLACEHOLDER.SUBTITLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TABLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TITLE: PP_PLACEHOLDER.TITLE,
}[self._element.ph_type]
slide_master = self.part.slide_master
return slide_master.placeholders.get(base_ph_type, None)
class MasterPlaceholder(BasePlaceholder):
"""
Placeholder shape on a slide master.
"""
class NotesSlidePlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a notes slide. Inherits shape properties from the
placeholder on the notes master that has the same type (e.g. 'body').
"""
@property
def _base_placeholder(self):
"""
Return the notes master placeholder this notes slide placeholder
inherits from, or |None| if no placeholder of the matching type is
present.
"""
notes_master = self.part.notes_master
ph_type = self.element.ph_type
return notes_master.placeholders.get(ph_type=ph_type)
class SlidePlaceholder(_BaseSlidePlaceholder):
"""
Placeholder shape on a slide. Inherits shape properties from its
corresponding slide layout placeholder.
"""
class ChartPlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a chart."""
def insert_chart(self, chart_type, chart_data):
"""
Return a |PlaceholderGraphicFrame| object containing a new chart of
*chart_type* depicting *chart_data* and having the same position and
size as this placeholder. *chart_type* is one of the
:ref:`XlChartType` enumeration values. *chart_data* is a |ChartData|
object populated with the categories and series values for the chart.
Note that the new |Chart| object is not returned directly. The chart
object may be accessed using the
:attr:`~.PlaceholderGraphicFrame.chart` property of the returned
|PlaceholderGraphicFrame| object.
"""
rId = self.part.add_chart_part(chart_type, chart_data)
graphicFrame = self._new_chart_graphicFrame(
rId, self.left, self.top, self.width, self.height
)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_chart_graphicFrame(self, rId, x, y, cx, cy):
"""
Return a newly created `p:graphicFrame` element having the specified
position and size and containing the chart identified by *rId*.
"""
id_, name = self.shape_id, self.name
return CT_GraphicalObjectFrame.new_chart_graphicFrame(
id_, name, rId, x, y, cx, cy
)
class PicturePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a picture."""
def insert_picture(self, image_file):
"""Return a |PlaceholderPicture| object depicting the image in `image_file`.
`image_file` may be either a path (string) or a file-like object. The image is
cropped to fill the entire space of the placeholder. A |PlaceholderPicture|
object has all the properties and methods of a |Picture| shape except that the
value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is
`MSO_SHAPE_TYPE.PLACEHOLDER` instead of `MSO_SHAPE_TYPE.PICTURE`.
"""
pic = self._new_placeholder_pic(image_file)
self._replace_placeholder_with(pic)
return PlaceholderPicture(pic, self._parent)
def _new_placeholder_pic(self, image_file):
"""
Return a new `p:pic` element depicting the image in *image_file*,
suitable for use as a placeholder. In particular this means not
having an `a:xfrm` element, allowing its extents to be inherited from
its layout placeholder.
"""
rId, desc, image_size = self._get_or_add_image(image_file)
shape_id, name = self.shape_id, self.name
pic = CT_Picture.new_ph_pic(shape_id, name, desc, rId)
pic.crop_to_fit(image_size, (self.width, self.height))
return pic
def _get_or_add_image(self, image_file):
"""
Return an (rId, description, image_size) 3-tuple identifying the
related image part containing *image_file* and describing the image.
"""
image_part, rId = self.part.get_or_add_image_part(image_file)
desc, image_size = image_part.desc, image_part._px_size
return rId, desc, image_size
class PlaceholderGraphicFrame(GraphicFrame):
"""
Placeholder shape populated with a table, chart, or smart art.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
class PlaceholderPicture(_InheritsDimensions, Picture):
"""
Placeholder shape populated with a picture.
"""
@property
def _base_placeholder(self):
"""
Return the layout placeholder this picture placeholder inherits from.
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
class TablePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a table."""
def insert_table(self, rows, cols):
"""Return |PlaceholderGraphicFrame| object containing a `rows` by `cols` table.
The position and width of the table are those of the placeholder and its height
is proportional to the number of rows. A |PlaceholderGraphicFrame| object has
all the properties and methods of a |GraphicFrame| shape except that the value
of its :attr:`~._BaseSlidePlaceholder.shape_type` property is unconditionally
`MSO_SHAPE_TYPE.PLACEHOLDER`. Note that the return value is not the new table
but rather *contains* the new table. The table can be accessed using the
:attr:`~.PlaceholderGraphicFrame.table` property of the returned
|PlaceholderGraphicFrame| object.
"""
graphicFrame = self._new_placeholder_table(rows, cols)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_placeholder_table(self, rows, cols):
"""
Return a newly added `p:graphicFrame` element containing an empty
table with *rows* rows and *cols* columns, positioned at the location
of this placeholder and having its same width. The table's height is
determined by the number of rows.
"""
shape_id, name, height = self.shape_id, self.name, Emu(rows * 370840)
return CT_GraphicalObjectFrame.new_table_graphicFrame(
shape_id, name, rows, cols, self.left, self.top, self.width, height
)
|
insert_picture
|
Return a |PlaceholderPicture| object depicting the image in `image_file`.
`image_file` may be either a path (string) or a file-like object. The image is
cropped to fill the entire space of the placeholder. A |PlaceholderPicture|
object has all the properties and methods of a |Picture| shape except that the
value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is
`MSO_SHAPE_TYPE.PLACEHOLDER` instead of `MSO_SHAPE_TYPE.PICTURE`.
|
# encoding: utf-8
"""Placeholder-related objects.
Specific to shapes having a `p:ph` element. A placeholder has distinct behaviors
depending on whether it appears on a slide, layout, or master. Hence there is a
non-trivial class inheritance structure.
"""
from pptx.enum.shapes import MSO_SHAPE_TYPE, PP_PLACEHOLDER
from pptx.oxml.shapes.graphfrm import CT_GraphicalObjectFrame
from pptx.oxml.shapes.picture import CT_Picture
from pptx.shapes.autoshape import Shape
from pptx.shapes.graphfrm import GraphicFrame
from pptx.shapes.picture import Picture
from pptx.util import Emu
class _InheritsDimensions(object):
"""
Mixin class that provides inherited dimension behavior. Specifically,
left, top, width, and height report the value from the layout placeholder
where they would have otherwise reported |None|. This behavior is
distinctive to placeholders. :meth:`_base_placeholder` must be overridden
by all subclasses to provide lookup of the appropriate base placeholder
to inherit from.
"""
@property
def height(self):
"""
The effective height of this placeholder shape; its directly-applied
height if it has one, otherwise the height of its parent layout
placeholder.
"""
return self._effective_value("height")
@height.setter
def height(self, value):
self._element.cy = value
@property
def left(self):
"""
The effective left of this placeholder shape; its directly-applied
left if it has one, otherwise the left of its parent layout
placeholder.
"""
return self._effective_value("left")
@left.setter
def left(self, value):
self._element.x = value
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def top(self):
"""
The effective top of this placeholder shape; its directly-applied
top if it has one, otherwise the top of its parent layout
placeholder.
"""
return self._effective_value("top")
@top.setter
def top(self, value):
self._element.y = value
@property
def width(self):
"""
The effective width of this placeholder shape; its directly-applied
width if it has one, otherwise the width of its parent layout
placeholder.
"""
return self._effective_value("width")
@width.setter
def width(self, value):
self._element.cx = value
@property
def _base_placeholder(self):
"""
Return the layout or master placeholder shape this placeholder
inherits from. Not to be confused with an instance of
|BasePlaceholder| (necessarily).
"""
raise NotImplementedError("Must be implemented by all subclasses.")
def _effective_value(self, attr_name):
"""
The effective value of *attr_name* on this placeholder shape; its
directly-applied value if it has one, otherwise the value on the
layout placeholder it inherits from.
"""
directly_applied_value = getattr(super(_InheritsDimensions, self), attr_name)
if directly_applied_value is not None:
return directly_applied_value
return self._inherited_value(attr_name)
def _inherited_value(self, attr_name):
"""
Return the attribute value, e.g. 'width' of the base placeholder this
placeholder inherits from.
"""
base_placeholder = self._base_placeholder
if base_placeholder is None:
return None
inherited_value = getattr(base_placeholder, attr_name)
return inherited_value
class _BaseSlidePlaceholder(_InheritsDimensions, Shape):
"""Base class for placeholders on slides.
Provides common behaviors such as inherited dimensions.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def _base_placeholder(self):
"""
Return the layout placeholder this slide placeholder inherits from.
Not to be confused with an instance of |BasePlaceholder|
(necessarily).
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
def _replace_placeholder_with(self, element):
"""
Substitute *element* for this placeholder element in the shapetree.
This placeholder's `._element` attribute is set to |None| and its
original element is free for garbage collection. Any attribute access
(including a method call) on this placeholder after this call raises
|AttributeError|.
"""
element._nvXxPr.nvPr._insert_ph(self._element.ph)
self._element.addprevious(element)
self._element.getparent().remove(self._element)
self._element = None
class BasePlaceholder(Shape):
"""
NOTE: This class is deprecated and will be removed from a future release
along with the properties *idx*, *orient*, *ph_type*, and *sz*. The *idx*
property will be available via the .placeholder_format property. The
others will be accessed directly from the oxml layer as they are only
used for internal purposes.
Base class for placeholder subclasses that differentiate the varying
behaviors of placeholders on a master, layout, and slide.
"""
@property
def idx(self):
"""
Integer placeholder 'idx' attribute, e.g. 0
"""
return self._sp.ph_idx
@property
def orient(self):
"""
Placeholder orientation, e.g. ST_Direction.HORZ
"""
return self._sp.ph_orient
@property
def ph_type(self):
"""
Placeholder type, e.g. PP_PLACEHOLDER.CENTER_TITLE
"""
return self._sp.ph_type
@property
def sz(self):
"""
Placeholder 'sz' attribute, e.g. ST_PlaceholderSize.FULL
"""
return self._sp.ph_sz
class LayoutPlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a slide layout, providing differentiated behavior
for slide layout placeholders, in particular, inheriting shape properties
from the master placeholder having the same type, when a matching one
exists.
"""
@property
def _base_placeholder(self):
"""
Return the master placeholder this layout placeholder inherits from.
"""
base_ph_type = {
PP_PLACEHOLDER.BODY: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.BITMAP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CENTER_TITLE: PP_PLACEHOLDER.TITLE,
PP_PLACEHOLDER.ORG_CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.DATE: PP_PLACEHOLDER.DATE,
PP_PLACEHOLDER.FOOTER: PP_PLACEHOLDER.FOOTER,
PP_PLACEHOLDER.MEDIA_CLIP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.OBJECT: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.PICTURE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.SLIDE_NUMBER: PP_PLACEHOLDER.SLIDE_NUMBER,
PP_PLACEHOLDER.SUBTITLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TABLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TITLE: PP_PLACEHOLDER.TITLE,
}[self._element.ph_type]
slide_master = self.part.slide_master
return slide_master.placeholders.get(base_ph_type, None)
class MasterPlaceholder(BasePlaceholder):
"""
Placeholder shape on a slide master.
"""
class NotesSlidePlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a notes slide. Inherits shape properties from the
placeholder on the notes master that has the same type (e.g. 'body').
"""
@property
def _base_placeholder(self):
"""
Return the notes master placeholder this notes slide placeholder
inherits from, or |None| if no placeholder of the matching type is
present.
"""
notes_master = self.part.notes_master
ph_type = self.element.ph_type
return notes_master.placeholders.get(ph_type=ph_type)
class SlidePlaceholder(_BaseSlidePlaceholder):
"""
Placeholder shape on a slide. Inherits shape properties from its
corresponding slide layout placeholder.
"""
class ChartPlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a chart."""
def insert_chart(self, chart_type, chart_data):
"""
Return a |PlaceholderGraphicFrame| object containing a new chart of
*chart_type* depicting *chart_data* and having the same position and
size as this placeholder. *chart_type* is one of the
:ref:`XlChartType` enumeration values. *chart_data* is a |ChartData|
object populated with the categories and series values for the chart.
Note that the new |Chart| object is not returned directly. The chart
object may be accessed using the
:attr:`~.PlaceholderGraphicFrame.chart` property of the returned
|PlaceholderGraphicFrame| object.
"""
rId = self.part.add_chart_part(chart_type, chart_data)
graphicFrame = self._new_chart_graphicFrame(
rId, self.left, self.top, self.width, self.height
)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_chart_graphicFrame(self, rId, x, y, cx, cy):
"""
Return a newly created `p:graphicFrame` element having the specified
position and size and containing the chart identified by *rId*.
"""
id_, name = self.shape_id, self.name
return CT_GraphicalObjectFrame.new_chart_graphicFrame(
id_, name, rId, x, y, cx, cy
)
class PicturePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a picture."""
# MASKED: insert_picture function (lines 310-321)
def _new_placeholder_pic(self, image_file):
"""
Return a new `p:pic` element depicting the image in *image_file*,
suitable for use as a placeholder. In particular this means not
having an `a:xfrm` element, allowing its extents to be inherited from
its layout placeholder.
"""
rId, desc, image_size = self._get_or_add_image(image_file)
shape_id, name = self.shape_id, self.name
pic = CT_Picture.new_ph_pic(shape_id, name, desc, rId)
pic.crop_to_fit(image_size, (self.width, self.height))
return pic
def _get_or_add_image(self, image_file):
"""
Return an (rId, description, image_size) 3-tuple identifying the
related image part containing *image_file* and describing the image.
"""
image_part, rId = self.part.get_or_add_image_part(image_file)
desc, image_size = image_part.desc, image_part._px_size
return rId, desc, image_size
class PlaceholderGraphicFrame(GraphicFrame):
"""
Placeholder shape populated with a table, chart, or smart art.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
class PlaceholderPicture(_InheritsDimensions, Picture):
"""
Placeholder shape populated with a picture.
"""
@property
def _base_placeholder(self):
"""
Return the layout placeholder this picture placeholder inherits from.
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
class TablePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a table."""
def insert_table(self, rows, cols):
"""Return |PlaceholderGraphicFrame| object containing a `rows` by `cols` table.
The position and width of the table are those of the placeholder and its height
is proportional to the number of rows. A |PlaceholderGraphicFrame| object has
all the properties and methods of a |GraphicFrame| shape except that the value
of its :attr:`~._BaseSlidePlaceholder.shape_type` property is unconditionally
`MSO_SHAPE_TYPE.PLACEHOLDER`. Note that the return value is not the new table
but rather *contains* the new table. The table can be accessed using the
:attr:`~.PlaceholderGraphicFrame.table` property of the returned
|PlaceholderGraphicFrame| object.
"""
graphicFrame = self._new_placeholder_table(rows, cols)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_placeholder_table(self, rows, cols):
"""
Return a newly added `p:graphicFrame` element containing an empty
table with *rows* rows and *cols* columns, positioned at the location
of this placeholder and having its same width. The table's height is
determined by the number of rows.
"""
shape_id, name, height = self.shape_id, self.name, Emu(rows * 370840)
return CT_GraphicalObjectFrame.new_table_graphicFrame(
shape_id, name, rows, cols, self.left, self.top, self.width, height
)
|
def insert_picture(self, image_file):
"""Return a |PlaceholderPicture| object depicting the image in `image_file`.
`image_file` may be either a path (string) or a file-like object. The image is
cropped to fill the entire space of the placeholder. A |PlaceholderPicture|
object has all the properties and methods of a |Picture| shape except that the
value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is
`MSO_SHAPE_TYPE.PLACEHOLDER` instead of `MSO_SHAPE_TYPE.PICTURE`.
"""
pic = self._new_placeholder_pic(image_file)
self._replace_placeholder_with(pic)
return PlaceholderPicture(pic, self._parent)
| 310 | 321 |
# encoding: utf-8
"""Placeholder-related objects.
Specific to shapes having a `p:ph` element. A placeholder has distinct behaviors
depending on whether it appears on a slide, layout, or master. Hence there is a
non-trivial class inheritance structure.
"""
from pptx.enum.shapes import MSO_SHAPE_TYPE, PP_PLACEHOLDER
from pptx.oxml.shapes.graphfrm import CT_GraphicalObjectFrame
from pptx.oxml.shapes.picture import CT_Picture
from pptx.shapes.autoshape import Shape
from pptx.shapes.graphfrm import GraphicFrame
from pptx.shapes.picture import Picture
from pptx.util import Emu
class _InheritsDimensions(object):
"""
Mixin class that provides inherited dimension behavior. Specifically,
left, top, width, and height report the value from the layout placeholder
where they would have otherwise reported |None|. This behavior is
distinctive to placeholders. :meth:`_base_placeholder` must be overridden
by all subclasses to provide lookup of the appropriate base placeholder
to inherit from.
"""
@property
def height(self):
"""
The effective height of this placeholder shape; its directly-applied
height if it has one, otherwise the height of its parent layout
placeholder.
"""
return self._effective_value("height")
@height.setter
def height(self, value):
self._element.cy = value
@property
def left(self):
"""
The effective left of this placeholder shape; its directly-applied
left if it has one, otherwise the left of its parent layout
placeholder.
"""
return self._effective_value("left")
@left.setter
def left(self, value):
self._element.x = value
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def top(self):
"""
The effective top of this placeholder shape; its directly-applied
top if it has one, otherwise the top of its parent layout
placeholder.
"""
return self._effective_value("top")
@top.setter
def top(self, value):
self._element.y = value
@property
def width(self):
"""
The effective width of this placeholder shape; its directly-applied
width if it has one, otherwise the width of its parent layout
placeholder.
"""
return self._effective_value("width")
@width.setter
def width(self, value):
self._element.cx = value
@property
def _base_placeholder(self):
"""
Return the layout or master placeholder shape this placeholder
inherits from. Not to be confused with an instance of
|BasePlaceholder| (necessarily).
"""
raise NotImplementedError("Must be implemented by all subclasses.")
def _effective_value(self, attr_name):
"""
The effective value of *attr_name* on this placeholder shape; its
directly-applied value if it has one, otherwise the value on the
layout placeholder it inherits from.
"""
directly_applied_value = getattr(super(_InheritsDimensions, self), attr_name)
if directly_applied_value is not None:
return directly_applied_value
return self._inherited_value(attr_name)
def _inherited_value(self, attr_name):
"""
Return the attribute value, e.g. 'width' of the base placeholder this
placeholder inherits from.
"""
base_placeholder = self._base_placeholder
if base_placeholder is None:
return None
inherited_value = getattr(base_placeholder, attr_name)
return inherited_value
class _BaseSlidePlaceholder(_InheritsDimensions, Shape):
"""Base class for placeholders on slides.
Provides common behaviors such as inherited dimensions.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def _base_placeholder(self):
"""
Return the layout placeholder this slide placeholder inherits from.
Not to be confused with an instance of |BasePlaceholder|
(necessarily).
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
def _replace_placeholder_with(self, element):
"""
Substitute *element* for this placeholder element in the shapetree.
This placeholder's `._element` attribute is set to |None| and its
original element is free for garbage collection. Any attribute access
(including a method call) on this placeholder after this call raises
|AttributeError|.
"""
element._nvXxPr.nvPr._insert_ph(self._element.ph)
self._element.addprevious(element)
self._element.getparent().remove(self._element)
self._element = None
class BasePlaceholder(Shape):
"""
NOTE: This class is deprecated and will be removed from a future release
along with the properties *idx*, *orient*, *ph_type*, and *sz*. The *idx*
property will be available via the .placeholder_format property. The
others will be accessed directly from the oxml layer as they are only
used for internal purposes.
Base class for placeholder subclasses that differentiate the varying
behaviors of placeholders on a master, layout, and slide.
"""
@property
def idx(self):
"""
Integer placeholder 'idx' attribute, e.g. 0
"""
return self._sp.ph_idx
@property
def orient(self):
"""
Placeholder orientation, e.g. ST_Direction.HORZ
"""
return self._sp.ph_orient
@property
def ph_type(self):
"""
Placeholder type, e.g. PP_PLACEHOLDER.CENTER_TITLE
"""
return self._sp.ph_type
@property
def sz(self):
"""
Placeholder 'sz' attribute, e.g. ST_PlaceholderSize.FULL
"""
return self._sp.ph_sz
class LayoutPlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a slide layout, providing differentiated behavior
for slide layout placeholders, in particular, inheriting shape properties
from the master placeholder having the same type, when a matching one
exists.
"""
@property
def _base_placeholder(self):
"""
Return the master placeholder this layout placeholder inherits from.
"""
base_ph_type = {
PP_PLACEHOLDER.BODY: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.BITMAP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CENTER_TITLE: PP_PLACEHOLDER.TITLE,
PP_PLACEHOLDER.ORG_CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.DATE: PP_PLACEHOLDER.DATE,
PP_PLACEHOLDER.FOOTER: PP_PLACEHOLDER.FOOTER,
PP_PLACEHOLDER.MEDIA_CLIP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.OBJECT: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.PICTURE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.SLIDE_NUMBER: PP_PLACEHOLDER.SLIDE_NUMBER,
PP_PLACEHOLDER.SUBTITLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TABLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TITLE: PP_PLACEHOLDER.TITLE,
}[self._element.ph_type]
slide_master = self.part.slide_master
return slide_master.placeholders.get(base_ph_type, None)
class MasterPlaceholder(BasePlaceholder):
"""
Placeholder shape on a slide master.
"""
class NotesSlidePlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a notes slide. Inherits shape properties from the
placeholder on the notes master that has the same type (e.g. 'body').
"""
@property
def _base_placeholder(self):
"""
Return the notes master placeholder this notes slide placeholder
inherits from, or |None| if no placeholder of the matching type is
present.
"""
notes_master = self.part.notes_master
ph_type = self.element.ph_type
return notes_master.placeholders.get(ph_type=ph_type)
class SlidePlaceholder(_BaseSlidePlaceholder):
"""
Placeholder shape on a slide. Inherits shape properties from its
corresponding slide layout placeholder.
"""
class ChartPlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a chart."""
def insert_chart(self, chart_type, chart_data):
"""
Return a |PlaceholderGraphicFrame| object containing a new chart of
*chart_type* depicting *chart_data* and having the same position and
size as this placeholder. *chart_type* is one of the
:ref:`XlChartType` enumeration values. *chart_data* is a |ChartData|
object populated with the categories and series values for the chart.
Note that the new |Chart| object is not returned directly. The chart
object may be accessed using the
:attr:`~.PlaceholderGraphicFrame.chart` property of the returned
|PlaceholderGraphicFrame| object.
"""
rId = self.part.add_chart_part(chart_type, chart_data)
graphicFrame = self._new_chart_graphicFrame(
rId, self.left, self.top, self.width, self.height
)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_chart_graphicFrame(self, rId, x, y, cx, cy):
"""
Return a newly created `p:graphicFrame` element having the specified
position and size and containing the chart identified by *rId*.
"""
id_, name = self.shape_id, self.name
return CT_GraphicalObjectFrame.new_chart_graphicFrame(
id_, name, rId, x, y, cx, cy
)
class PicturePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a picture."""
def insert_picture(self, image_file):
"""Return a |PlaceholderPicture| object depicting the image in `image_file`.
`image_file` may be either a path (string) or a file-like object. The image is
cropped to fill the entire space of the placeholder. A |PlaceholderPicture|
object has all the properties and methods of a |Picture| shape except that the
value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is
`MSO_SHAPE_TYPE.PLACEHOLDER` instead of `MSO_SHAPE_TYPE.PICTURE`.
"""
pic = self._new_placeholder_pic(image_file)
self._replace_placeholder_with(pic)
return PlaceholderPicture(pic, self._parent)
def _new_placeholder_pic(self, image_file):
"""
Return a new `p:pic` element depicting the image in *image_file*,
suitable for use as a placeholder. In particular this means not
having an `a:xfrm` element, allowing its extents to be inherited from
its layout placeholder.
"""
rId, desc, image_size = self._get_or_add_image(image_file)
shape_id, name = self.shape_id, self.name
pic = CT_Picture.new_ph_pic(shape_id, name, desc, rId)
pic.crop_to_fit(image_size, (self.width, self.height))
return pic
def _get_or_add_image(self, image_file):
"""
Return an (rId, description, image_size) 3-tuple identifying the
related image part containing *image_file* and describing the image.
"""
image_part, rId = self.part.get_or_add_image_part(image_file)
desc, image_size = image_part.desc, image_part._px_size
return rId, desc, image_size
class PlaceholderGraphicFrame(GraphicFrame):
"""
Placeholder shape populated with a table, chart, or smart art.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
class PlaceholderPicture(_InheritsDimensions, Picture):
"""
Placeholder shape populated with a picture.
"""
@property
def _base_placeholder(self):
"""
Return the layout placeholder this picture placeholder inherits from.
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
class TablePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a table."""
def insert_table(self, rows, cols):
"""Return |PlaceholderGraphicFrame| object containing a `rows` by `cols` table.
The position and width of the table are those of the placeholder and its height
is proportional to the number of rows. A |PlaceholderGraphicFrame| object has
all the properties and methods of a |GraphicFrame| shape except that the value
of its :attr:`~._BaseSlidePlaceholder.shape_type` property is unconditionally
`MSO_SHAPE_TYPE.PLACEHOLDER`. Note that the return value is not the new table
but rather *contains* the new table. The table can be accessed using the
:attr:`~.PlaceholderGraphicFrame.table` property of the returned
|PlaceholderGraphicFrame| object.
"""
graphicFrame = self._new_placeholder_table(rows, cols)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_placeholder_table(self, rows, cols):
"""
Return a newly added `p:graphicFrame` element containing an empty
table with *rows* rows and *cols* columns, positioned at the location
of this placeholder and having its same width. The table's height is
determined by the number of rows.
"""
shape_id, name, height = self.shape_id, self.name, Emu(rows * 370840)
return CT_GraphicalObjectFrame.new_table_graphicFrame(
shape_id, name, rows, cols, self.left, self.top, self.width, height
)
|
insert_table
|
Return |PlaceholderGraphicFrame| object containing a `rows` by `cols` table.
The position and width of the table are those of the placeholder and its height
is proportional to the number of rows. A |PlaceholderGraphicFrame| object has
all the properties and methods of a |GraphicFrame| shape except that the value
of its :attr:`~._BaseSlidePlaceholder.shape_type` property is unconditionally
`MSO_SHAPE_TYPE.PLACEHOLDER`. Note that the return value is not the new table
but rather *contains* the new table. The table can be accessed using the
:attr:`~.PlaceholderGraphicFrame.table` property of the returned
|PlaceholderGraphicFrame| object.
|
# encoding: utf-8
"""Placeholder-related objects.
Specific to shapes having a `p:ph` element. A placeholder has distinct behaviors
depending on whether it appears on a slide, layout, or master. Hence there is a
non-trivial class inheritance structure.
"""
from pptx.enum.shapes import MSO_SHAPE_TYPE, PP_PLACEHOLDER
from pptx.oxml.shapes.graphfrm import CT_GraphicalObjectFrame
from pptx.oxml.shapes.picture import CT_Picture
from pptx.shapes.autoshape import Shape
from pptx.shapes.graphfrm import GraphicFrame
from pptx.shapes.picture import Picture
from pptx.util import Emu
class _InheritsDimensions(object):
"""
Mixin class that provides inherited dimension behavior. Specifically,
left, top, width, and height report the value from the layout placeholder
where they would have otherwise reported |None|. This behavior is
distinctive to placeholders. :meth:`_base_placeholder` must be overridden
by all subclasses to provide lookup of the appropriate base placeholder
to inherit from.
"""
@property
def height(self):
"""
The effective height of this placeholder shape; its directly-applied
height if it has one, otherwise the height of its parent layout
placeholder.
"""
return self._effective_value("height")
@height.setter
def height(self, value):
self._element.cy = value
@property
def left(self):
"""
The effective left of this placeholder shape; its directly-applied
left if it has one, otherwise the left of its parent layout
placeholder.
"""
return self._effective_value("left")
@left.setter
def left(self, value):
self._element.x = value
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def top(self):
"""
The effective top of this placeholder shape; its directly-applied
top if it has one, otherwise the top of its parent layout
placeholder.
"""
return self._effective_value("top")
@top.setter
def top(self, value):
self._element.y = value
@property
def width(self):
"""
The effective width of this placeholder shape; its directly-applied
width if it has one, otherwise the width of its parent layout
placeholder.
"""
return self._effective_value("width")
@width.setter
def width(self, value):
self._element.cx = value
@property
def _base_placeholder(self):
"""
Return the layout or master placeholder shape this placeholder
inherits from. Not to be confused with an instance of
|BasePlaceholder| (necessarily).
"""
raise NotImplementedError("Must be implemented by all subclasses.")
def _effective_value(self, attr_name):
"""
The effective value of *attr_name* on this placeholder shape; its
directly-applied value if it has one, otherwise the value on the
layout placeholder it inherits from.
"""
directly_applied_value = getattr(super(_InheritsDimensions, self), attr_name)
if directly_applied_value is not None:
return directly_applied_value
return self._inherited_value(attr_name)
def _inherited_value(self, attr_name):
"""
Return the attribute value, e.g. 'width' of the base placeholder this
placeholder inherits from.
"""
base_placeholder = self._base_placeholder
if base_placeholder is None:
return None
inherited_value = getattr(base_placeholder, attr_name)
return inherited_value
class _BaseSlidePlaceholder(_InheritsDimensions, Shape):
"""Base class for placeholders on slides.
Provides common behaviors such as inherited dimensions.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def _base_placeholder(self):
"""
Return the layout placeholder this slide placeholder inherits from.
Not to be confused with an instance of |BasePlaceholder|
(necessarily).
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
def _replace_placeholder_with(self, element):
"""
Substitute *element* for this placeholder element in the shapetree.
This placeholder's `._element` attribute is set to |None| and its
original element is free for garbage collection. Any attribute access
(including a method call) on this placeholder after this call raises
|AttributeError|.
"""
element._nvXxPr.nvPr._insert_ph(self._element.ph)
self._element.addprevious(element)
self._element.getparent().remove(self._element)
self._element = None
class BasePlaceholder(Shape):
"""
NOTE: This class is deprecated and will be removed from a future release
along with the properties *idx*, *orient*, *ph_type*, and *sz*. The *idx*
property will be available via the .placeholder_format property. The
others will be accessed directly from the oxml layer as they are only
used for internal purposes.
Base class for placeholder subclasses that differentiate the varying
behaviors of placeholders on a master, layout, and slide.
"""
@property
def idx(self):
"""
Integer placeholder 'idx' attribute, e.g. 0
"""
return self._sp.ph_idx
@property
def orient(self):
"""
Placeholder orientation, e.g. ST_Direction.HORZ
"""
return self._sp.ph_orient
@property
def ph_type(self):
"""
Placeholder type, e.g. PP_PLACEHOLDER.CENTER_TITLE
"""
return self._sp.ph_type
@property
def sz(self):
"""
Placeholder 'sz' attribute, e.g. ST_PlaceholderSize.FULL
"""
return self._sp.ph_sz
class LayoutPlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a slide layout, providing differentiated behavior
for slide layout placeholders, in particular, inheriting shape properties
from the master placeholder having the same type, when a matching one
exists.
"""
@property
def _base_placeholder(self):
"""
Return the master placeholder this layout placeholder inherits from.
"""
base_ph_type = {
PP_PLACEHOLDER.BODY: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.BITMAP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CENTER_TITLE: PP_PLACEHOLDER.TITLE,
PP_PLACEHOLDER.ORG_CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.DATE: PP_PLACEHOLDER.DATE,
PP_PLACEHOLDER.FOOTER: PP_PLACEHOLDER.FOOTER,
PP_PLACEHOLDER.MEDIA_CLIP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.OBJECT: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.PICTURE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.SLIDE_NUMBER: PP_PLACEHOLDER.SLIDE_NUMBER,
PP_PLACEHOLDER.SUBTITLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TABLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TITLE: PP_PLACEHOLDER.TITLE,
}[self._element.ph_type]
slide_master = self.part.slide_master
return slide_master.placeholders.get(base_ph_type, None)
class MasterPlaceholder(BasePlaceholder):
"""
Placeholder shape on a slide master.
"""
class NotesSlidePlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a notes slide. Inherits shape properties from the
placeholder on the notes master that has the same type (e.g. 'body').
"""
@property
def _base_placeholder(self):
"""
Return the notes master placeholder this notes slide placeholder
inherits from, or |None| if no placeholder of the matching type is
present.
"""
notes_master = self.part.notes_master
ph_type = self.element.ph_type
return notes_master.placeholders.get(ph_type=ph_type)
class SlidePlaceholder(_BaseSlidePlaceholder):
"""
Placeholder shape on a slide. Inherits shape properties from its
corresponding slide layout placeholder.
"""
class ChartPlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a chart."""
def insert_chart(self, chart_type, chart_data):
"""
Return a |PlaceholderGraphicFrame| object containing a new chart of
*chart_type* depicting *chart_data* and having the same position and
size as this placeholder. *chart_type* is one of the
:ref:`XlChartType` enumeration values. *chart_data* is a |ChartData|
object populated with the categories and series values for the chart.
Note that the new |Chart| object is not returned directly. The chart
object may be accessed using the
:attr:`~.PlaceholderGraphicFrame.chart` property of the returned
|PlaceholderGraphicFrame| object.
"""
rId = self.part.add_chart_part(chart_type, chart_data)
graphicFrame = self._new_chart_graphicFrame(
rId, self.left, self.top, self.width, self.height
)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_chart_graphicFrame(self, rId, x, y, cx, cy):
"""
Return a newly created `p:graphicFrame` element having the specified
position and size and containing the chart identified by *rId*.
"""
id_, name = self.shape_id, self.name
return CT_GraphicalObjectFrame.new_chart_graphicFrame(
id_, name, rId, x, y, cx, cy
)
class PicturePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a picture."""
def insert_picture(self, image_file):
"""Return a |PlaceholderPicture| object depicting the image in `image_file`.
`image_file` may be either a path (string) or a file-like object. The image is
cropped to fill the entire space of the placeholder. A |PlaceholderPicture|
object has all the properties and methods of a |Picture| shape except that the
value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is
`MSO_SHAPE_TYPE.PLACEHOLDER` instead of `MSO_SHAPE_TYPE.PICTURE`.
"""
pic = self._new_placeholder_pic(image_file)
self._replace_placeholder_with(pic)
return PlaceholderPicture(pic, self._parent)
def _new_placeholder_pic(self, image_file):
"""
Return a new `p:pic` element depicting the image in *image_file*,
suitable for use as a placeholder. In particular this means not
having an `a:xfrm` element, allowing its extents to be inherited from
its layout placeholder.
"""
rId, desc, image_size = self._get_or_add_image(image_file)
shape_id, name = self.shape_id, self.name
pic = CT_Picture.new_ph_pic(shape_id, name, desc, rId)
pic.crop_to_fit(image_size, (self.width, self.height))
return pic
def _get_or_add_image(self, image_file):
"""
Return an (rId, description, image_size) 3-tuple identifying the
related image part containing *image_file* and describing the image.
"""
image_part, rId = self.part.get_or_add_image_part(image_file)
desc, image_size = image_part.desc, image_part._px_size
return rId, desc, image_size
class PlaceholderGraphicFrame(GraphicFrame):
"""
Placeholder shape populated with a table, chart, or smart art.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
class PlaceholderPicture(_InheritsDimensions, Picture):
"""
Placeholder shape populated with a picture.
"""
@property
def _base_placeholder(self):
"""
Return the layout placeholder this picture placeholder inherits from.
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
class TablePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a table."""
# MASKED: insert_table function (lines 377-391)
def _new_placeholder_table(self, rows, cols):
"""
Return a newly added `p:graphicFrame` element containing an empty
table with *rows* rows and *cols* columns, positioned at the location
of this placeholder and having its same width. The table's height is
determined by the number of rows.
"""
shape_id, name, height = self.shape_id, self.name, Emu(rows * 370840)
return CT_GraphicalObjectFrame.new_table_graphicFrame(
shape_id, name, rows, cols, self.left, self.top, self.width, height
)
|
def insert_table(self, rows, cols):
"""Return |PlaceholderGraphicFrame| object containing a `rows` by `cols` table.
The position and width of the table are those of the placeholder and its height
is proportional to the number of rows. A |PlaceholderGraphicFrame| object has
all the properties and methods of a |GraphicFrame| shape except that the value
of its :attr:`~._BaseSlidePlaceholder.shape_type` property is unconditionally
`MSO_SHAPE_TYPE.PLACEHOLDER`. Note that the return value is not the new table
but rather *contains* the new table. The table can be accessed using the
:attr:`~.PlaceholderGraphicFrame.table` property of the returned
|PlaceholderGraphicFrame| object.
"""
graphicFrame = self._new_placeholder_table(rows, cols)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
| 377 | 391 |
# encoding: utf-8
"""Placeholder-related objects.
Specific to shapes having a `p:ph` element. A placeholder has distinct behaviors
depending on whether it appears on a slide, layout, or master. Hence there is a
non-trivial class inheritance structure.
"""
from pptx.enum.shapes import MSO_SHAPE_TYPE, PP_PLACEHOLDER
from pptx.oxml.shapes.graphfrm import CT_GraphicalObjectFrame
from pptx.oxml.shapes.picture import CT_Picture
from pptx.shapes.autoshape import Shape
from pptx.shapes.graphfrm import GraphicFrame
from pptx.shapes.picture import Picture
from pptx.util import Emu
class _InheritsDimensions(object):
"""
Mixin class that provides inherited dimension behavior. Specifically,
left, top, width, and height report the value from the layout placeholder
where they would have otherwise reported |None|. This behavior is
distinctive to placeholders. :meth:`_base_placeholder` must be overridden
by all subclasses to provide lookup of the appropriate base placeholder
to inherit from.
"""
@property
def height(self):
"""
The effective height of this placeholder shape; its directly-applied
height if it has one, otherwise the height of its parent layout
placeholder.
"""
return self._effective_value("height")
@height.setter
def height(self, value):
self._element.cy = value
@property
def left(self):
"""
The effective left of this placeholder shape; its directly-applied
left if it has one, otherwise the left of its parent layout
placeholder.
"""
return self._effective_value("left")
@left.setter
def left(self, value):
self._element.x = value
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def top(self):
"""
The effective top of this placeholder shape; its directly-applied
top if it has one, otherwise the top of its parent layout
placeholder.
"""
return self._effective_value("top")
@top.setter
def top(self, value):
self._element.y = value
@property
def width(self):
"""
The effective width of this placeholder shape; its directly-applied
width if it has one, otherwise the width of its parent layout
placeholder.
"""
return self._effective_value("width")
@width.setter
def width(self, value):
self._element.cx = value
@property
def _base_placeholder(self):
"""
Return the layout or master placeholder shape this placeholder
inherits from. Not to be confused with an instance of
|BasePlaceholder| (necessarily).
"""
raise NotImplementedError("Must be implemented by all subclasses.")
def _effective_value(self, attr_name):
"""
The effective value of *attr_name* on this placeholder shape; its
directly-applied value if it has one, otherwise the value on the
layout placeholder it inherits from.
"""
directly_applied_value = getattr(super(_InheritsDimensions, self), attr_name)
if directly_applied_value is not None:
return directly_applied_value
return self._inherited_value(attr_name)
def _inherited_value(self, attr_name):
"""
Return the attribute value, e.g. 'width' of the base placeholder this
placeholder inherits from.
"""
base_placeholder = self._base_placeholder
if base_placeholder is None:
return None
inherited_value = getattr(base_placeholder, attr_name)
return inherited_value
class _BaseSlidePlaceholder(_InheritsDimensions, Shape):
"""Base class for placeholders on slides.
Provides common behaviors such as inherited dimensions.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
@property
def shape_type(self):
"""
Member of :ref:`MsoShapeType` specifying the type of this shape.
Unconditionally ``MSO_SHAPE_TYPE.PLACEHOLDER`` in this case.
Read-only.
"""
return MSO_SHAPE_TYPE.PLACEHOLDER
@property
def _base_placeholder(self):
"""
Return the layout placeholder this slide placeholder inherits from.
Not to be confused with an instance of |BasePlaceholder|
(necessarily).
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
def _replace_placeholder_with(self, element):
"""
Substitute *element* for this placeholder element in the shapetree.
This placeholder's `._element` attribute is set to |None| and its
original element is free for garbage collection. Any attribute access
(including a method call) on this placeholder after this call raises
|AttributeError|.
"""
element._nvXxPr.nvPr._insert_ph(self._element.ph)
self._element.addprevious(element)
self._element.getparent().remove(self._element)
self._element = None
class BasePlaceholder(Shape):
"""
NOTE: This class is deprecated and will be removed from a future release
along with the properties *idx*, *orient*, *ph_type*, and *sz*. The *idx*
property will be available via the .placeholder_format property. The
others will be accessed directly from the oxml layer as they are only
used for internal purposes.
Base class for placeholder subclasses that differentiate the varying
behaviors of placeholders on a master, layout, and slide.
"""
@property
def idx(self):
"""
Integer placeholder 'idx' attribute, e.g. 0
"""
return self._sp.ph_idx
@property
def orient(self):
"""
Placeholder orientation, e.g. ST_Direction.HORZ
"""
return self._sp.ph_orient
@property
def ph_type(self):
"""
Placeholder type, e.g. PP_PLACEHOLDER.CENTER_TITLE
"""
return self._sp.ph_type
@property
def sz(self):
"""
Placeholder 'sz' attribute, e.g. ST_PlaceholderSize.FULL
"""
return self._sp.ph_sz
class LayoutPlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a slide layout, providing differentiated behavior
for slide layout placeholders, in particular, inheriting shape properties
from the master placeholder having the same type, when a matching one
exists.
"""
@property
def _base_placeholder(self):
"""
Return the master placeholder this layout placeholder inherits from.
"""
base_ph_type = {
PP_PLACEHOLDER.BODY: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.BITMAP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.CENTER_TITLE: PP_PLACEHOLDER.TITLE,
PP_PLACEHOLDER.ORG_CHART: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.DATE: PP_PLACEHOLDER.DATE,
PP_PLACEHOLDER.FOOTER: PP_PLACEHOLDER.FOOTER,
PP_PLACEHOLDER.MEDIA_CLIP: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.OBJECT: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.PICTURE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.SLIDE_NUMBER: PP_PLACEHOLDER.SLIDE_NUMBER,
PP_PLACEHOLDER.SUBTITLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TABLE: PP_PLACEHOLDER.BODY,
PP_PLACEHOLDER.TITLE: PP_PLACEHOLDER.TITLE,
}[self._element.ph_type]
slide_master = self.part.slide_master
return slide_master.placeholders.get(base_ph_type, None)
class MasterPlaceholder(BasePlaceholder):
"""
Placeholder shape on a slide master.
"""
class NotesSlidePlaceholder(_InheritsDimensions, Shape):
"""
Placeholder shape on a notes slide. Inherits shape properties from the
placeholder on the notes master that has the same type (e.g. 'body').
"""
@property
def _base_placeholder(self):
"""
Return the notes master placeholder this notes slide placeholder
inherits from, or |None| if no placeholder of the matching type is
present.
"""
notes_master = self.part.notes_master
ph_type = self.element.ph_type
return notes_master.placeholders.get(ph_type=ph_type)
class SlidePlaceholder(_BaseSlidePlaceholder):
"""
Placeholder shape on a slide. Inherits shape properties from its
corresponding slide layout placeholder.
"""
class ChartPlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a chart."""
def insert_chart(self, chart_type, chart_data):
"""
Return a |PlaceholderGraphicFrame| object containing a new chart of
*chart_type* depicting *chart_data* and having the same position and
size as this placeholder. *chart_type* is one of the
:ref:`XlChartType` enumeration values. *chart_data* is a |ChartData|
object populated with the categories and series values for the chart.
Note that the new |Chart| object is not returned directly. The chart
object may be accessed using the
:attr:`~.PlaceholderGraphicFrame.chart` property of the returned
|PlaceholderGraphicFrame| object.
"""
rId = self.part.add_chart_part(chart_type, chart_data)
graphicFrame = self._new_chart_graphicFrame(
rId, self.left, self.top, self.width, self.height
)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_chart_graphicFrame(self, rId, x, y, cx, cy):
"""
Return a newly created `p:graphicFrame` element having the specified
position and size and containing the chart identified by *rId*.
"""
id_, name = self.shape_id, self.name
return CT_GraphicalObjectFrame.new_chart_graphicFrame(
id_, name, rId, x, y, cx, cy
)
class PicturePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a picture."""
def insert_picture(self, image_file):
"""Return a |PlaceholderPicture| object depicting the image in `image_file`.
`image_file` may be either a path (string) or a file-like object. The image is
cropped to fill the entire space of the placeholder. A |PlaceholderPicture|
object has all the properties and methods of a |Picture| shape except that the
value of its :attr:`~._BaseSlidePlaceholder.shape_type` property is
`MSO_SHAPE_TYPE.PLACEHOLDER` instead of `MSO_SHAPE_TYPE.PICTURE`.
"""
pic = self._new_placeholder_pic(image_file)
self._replace_placeholder_with(pic)
return PlaceholderPicture(pic, self._parent)
def _new_placeholder_pic(self, image_file):
"""
Return a new `p:pic` element depicting the image in *image_file*,
suitable for use as a placeholder. In particular this means not
having an `a:xfrm` element, allowing its extents to be inherited from
its layout placeholder.
"""
rId, desc, image_size = self._get_or_add_image(image_file)
shape_id, name = self.shape_id, self.name
pic = CT_Picture.new_ph_pic(shape_id, name, desc, rId)
pic.crop_to_fit(image_size, (self.width, self.height))
return pic
def _get_or_add_image(self, image_file):
"""
Return an (rId, description, image_size) 3-tuple identifying the
related image part containing *image_file* and describing the image.
"""
image_part, rId = self.part.get_or_add_image_part(image_file)
desc, image_size = image_part.desc, image_part._px_size
return rId, desc, image_size
class PlaceholderGraphicFrame(GraphicFrame):
"""
Placeholder shape populated with a table, chart, or smart art.
"""
@property
def is_placeholder(self):
"""
Boolean indicating whether this shape is a placeholder.
Unconditionally |True| in this case.
"""
return True
class PlaceholderPicture(_InheritsDimensions, Picture):
"""
Placeholder shape populated with a picture.
"""
@property
def _base_placeholder(self):
"""
Return the layout placeholder this picture placeholder inherits from.
"""
layout, idx = self.part.slide_layout, self._element.ph_idx
return layout.placeholders.get(idx=idx)
class TablePlaceholder(_BaseSlidePlaceholder):
"""Placeholder shape that can only accept a table."""
def insert_table(self, rows, cols):
"""Return |PlaceholderGraphicFrame| object containing a `rows` by `cols` table.
The position and width of the table are those of the placeholder and its height
is proportional to the number of rows. A |PlaceholderGraphicFrame| object has
all the properties and methods of a |GraphicFrame| shape except that the value
of its :attr:`~._BaseSlidePlaceholder.shape_type` property is unconditionally
`MSO_SHAPE_TYPE.PLACEHOLDER`. Note that the return value is not the new table
but rather *contains* the new table. The table can be accessed using the
:attr:`~.PlaceholderGraphicFrame.table` property of the returned
|PlaceholderGraphicFrame| object.
"""
graphicFrame = self._new_placeholder_table(rows, cols)
self._replace_placeholder_with(graphicFrame)
return PlaceholderGraphicFrame(graphicFrame, self._parent)
def _new_placeholder_table(self, rows, cols):
"""
Return a newly added `p:graphicFrame` element containing an empty
table with *rows* rows and *cols* columns, positioned at the location
of this placeholder and having its same width. The table's height is
determined by the number of rows.
"""
shape_id, name, height = self.shape_id, self.name, Emu(rows * 370840)
return CT_GraphicalObjectFrame.new_table_graphicFrame(
shape_id, name, rows, cols, self.left, self.top, self.width, height
)
|
ensemble_negative_log_likelihood
|
Negative log-likelihood for ensemble.
For each datapoint (x,y), the ensemble's negative log-likelihood is:
```
-log p(y|x) = -log sum_{m=1}^{ensemble_size} exp(log p(y|x,theta_m)) +
log ensemble_size.
```
Args:
labels: tf.Tensor of shape [...].
logits: tf.Tensor of shape [ensemble_size, ..., num_classes].
Returns:
tf.Tensor of shape [...].
|
# coding=utf-8
# Copyright 2020 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensemble on ImageNet.
This script only performs evaluation, not training. We recommend training
ensembles by launching independent runs of `deterministic.py` over different
seeds.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import edward2 as ed
import deterministic_model # local file import
import utils # local file import
import numpy as np
import tensorflow.compat.v2 as tf
flags.DEFINE_integer('per_core_batch_size', 512, 'Batch size per TPU core/GPU.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.mark_flag_as_required('data_dir')
flags.DEFINE_string('checkpoint_dir', None,
'The directory where the model weights are stored.')
flags.mark_flag_as_required('checkpoint_dir')
flags.DEFINE_string('output_dir', '/tmp/imagenet',
'The directory where to save predictions.')
flags.DEFINE_string('alexnet_errors_path', None,
'Path to AlexNet corruption errors file.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', True, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_integer('num_cores', 1, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
# Number of images in eval dataset.
IMAGENET_VALIDATION_IMAGES = 50000
NUM_CLASSES = 1000
# MASKED: ensemble_negative_log_likelihood function (lines 64-87)
def gibbs_cross_entropy(labels, logits):
"""Average cross entropy for ensemble members (Gibbs cross entropy).
For each datapoint (x,y), the ensemble's Gibbs cross entropy is:
```
GCE = - (1/ensemble_size) sum_{m=1}^ensemble_size log p(y|x,theta_m).
```
The Gibbs cross entropy approximates the average cross entropy of a single
model drawn from the (Gibbs) ensemble.
Args:
labels: tf.Tensor of shape [...].
logits: tf.Tensor of shape [ensemble_size, ..., num_classes].
Returns:
tf.Tensor of shape [...].
"""
labels = tf.cast(labels, tf.int32)
logits = tf.convert_to_tensor(logits)
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),
logits)
return tf.reduce_mean(nll, axis=0)
def main(argv):
del argv # unused arg
if not FLAGS.use_gpu:
raise ValueError('Only GPU is currently supported.')
if FLAGS.num_cores > 1:
raise ValueError('Only a single accelerator is currently supported.')
tf.enable_v2_behavior()
tf.random.set_seed(FLAGS.seed)
tf.io.gfile.makedirs(FLAGS.output_dir)
batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size
dataset_test = utils.ImageNetInput(
is_training=False,
data_dir=FLAGS.data_dir,
batch_size=FLAGS.per_core_batch_size,
use_bfloat16=False).input_fn()
test_datasets = {'clean': dataset_test}
corruption_types, max_intensity = utils.load_corrupted_test_info()
for name in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset_name = '{0}_{1}'.format(name, intensity)
test_datasets[dataset_name] = utils.load_corrupted_test_dataset(
name=name,
intensity=intensity,
batch_size=FLAGS.per_core_batch_size,
drop_remainder=True,
use_bfloat16=False)
model = deterministic_model.resnet50(input_shape=(224, 224, 3),
num_classes=NUM_CLASSES)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Search for checkpoints from their index file; then remove the index suffix.
ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
'**/*.index'))
ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
ensemble_size = len(ensemble_filenames)
logging.info('Ensemble size: %s', ensemble_size)
logging.info('Ensemble number of weights: %s',
ensemble_size * model.count_params())
logging.info('Ensemble filenames: %s', str(ensemble_filenames))
checkpoint = tf.train.Checkpoint(model=model)
# Write model predictions to files.
num_datasets = len(test_datasets)
for m, ensemble_filename in enumerate(ensemble_filenames):
checkpoint.restore(ensemble_filename)
for n, (name, test_dataset) in enumerate(test_datasets.items()):
filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
filename = os.path.join(FLAGS.output_dir, filename)
if not tf.io.gfile.exists(filename):
logits = []
test_iterator = iter(test_dataset)
for _ in range(steps_per_eval):
features, _ = next(test_iterator) # pytype: disable=attribute-error
logits.append(model(features, training=False))
logits = tf.concat(logits, axis=0)
with tf.io.gfile.GFile(filename, 'w') as f:
np.save(f, logits.numpy())
percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
'Dataset {:d}/{:d}'.format(percent,
m + 1,
ensemble_size,
n + 1,
num_datasets))
logging.info(message)
metrics = {
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': ed.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
}
corrupt_metrics = {}
for name in test_datasets:
corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
corrupt_metrics['test/accuracy_{}'.format(name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(
name)] = ed.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
# Evaluate model predictions.
for n, (name, test_dataset) in enumerate(test_datasets.items()):
logits_dataset = []
for m in range(ensemble_size):
filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
filename = os.path.join(FLAGS.output_dir, filename)
with tf.io.gfile.GFile(filename, 'rb') as f:
logits_dataset.append(np.load(f))
logits_dataset = tf.convert_to_tensor(logits_dataset)
test_iterator = iter(test_dataset)
for step in range(steps_per_eval):
_, labels = next(test_iterator) # pytype: disable=attribute-error
logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
labels = tf.cast(tf.reshape(labels, [-1]), tf.int32)
negative_log_likelihood = tf.reduce_mean(
ensemble_negative_log_likelihood(labels, logits))
per_probs = tf.nn.softmax(logits)
probs = tf.reduce_mean(per_probs, axis=0)
if name == 'clean':
gibbs_ce = tf.reduce_mean(gibbs_cross_entropy(labels, logits))
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].update_state(labels, probs)
else:
corrupt_metrics['test/nll_{}'.format(name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(name)].update_state(
labels, probs)
message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
(n + 1) / num_datasets, n + 1, num_datasets))
logging.info(message)
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types,
max_intensity,
FLAGS.alexnet_errors_path)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
logging.info('Metrics: %s', total_results)
if __name__ == '__main__':
app.run(main)
|
def ensemble_negative_log_likelihood(labels, logits):
"""Negative log-likelihood for ensemble.
For each datapoint (x,y), the ensemble's negative log-likelihood is:
```
-log p(y|x) = -log sum_{m=1}^{ensemble_size} exp(log p(y|x,theta_m)) +
log ensemble_size.
```
Args:
labels: tf.Tensor of shape [...].
logits: tf.Tensor of shape [ensemble_size, ..., num_classes].
Returns:
tf.Tensor of shape [...].
"""
labels = tf.cast(labels, tf.int32)
logits = tf.convert_to_tensor(logits)
ensemble_size = float(logits.shape[0])
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),
logits)
return -tf.reduce_logsumexp(-nll, axis=0) + tf.math.log(ensemble_size)
| 64 | 87 |
# coding=utf-8
# Copyright 2020 The Edward2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensemble on ImageNet.
This script only performs evaluation, not training. We recommend training
ensembles by launching independent runs of `deterministic.py` over different
seeds.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import edward2 as ed
import deterministic_model # local file import
import utils # local file import
import numpy as np
import tensorflow.compat.v2 as tf
flags.DEFINE_integer('per_core_batch_size', 512, 'Batch size per TPU core/GPU.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_string('data_dir', None, 'Path to training and testing data.')
flags.mark_flag_as_required('data_dir')
flags.DEFINE_string('checkpoint_dir', None,
'The directory where the model weights are stored.')
flags.mark_flag_as_required('checkpoint_dir')
flags.DEFINE_string('output_dir', '/tmp/imagenet',
'The directory where to save predictions.')
flags.DEFINE_string('alexnet_errors_path', None,
'Path to AlexNet corruption errors file.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE computation.')
# Accelerator flags.
flags.DEFINE_bool('use_gpu', True, 'Whether to run on GPU or otherwise TPU.')
flags.DEFINE_integer('num_cores', 1, 'Number of TPU cores or number of GPUs.')
flags.DEFINE_string('tpu', None,
'Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
# Number of images in eval dataset.
IMAGENET_VALIDATION_IMAGES = 50000
NUM_CLASSES = 1000
def ensemble_negative_log_likelihood(labels, logits):
"""Negative log-likelihood for ensemble.
For each datapoint (x,y), the ensemble's negative log-likelihood is:
```
-log p(y|x) = -log sum_{m=1}^{ensemble_size} exp(log p(y|x,theta_m)) +
log ensemble_size.
```
Args:
labels: tf.Tensor of shape [...].
logits: tf.Tensor of shape [ensemble_size, ..., num_classes].
Returns:
tf.Tensor of shape [...].
"""
labels = tf.cast(labels, tf.int32)
logits = tf.convert_to_tensor(logits)
ensemble_size = float(logits.shape[0])
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),
logits)
return -tf.reduce_logsumexp(-nll, axis=0) + tf.math.log(ensemble_size)
def gibbs_cross_entropy(labels, logits):
"""Average cross entropy for ensemble members (Gibbs cross entropy).
For each datapoint (x,y), the ensemble's Gibbs cross entropy is:
```
GCE = - (1/ensemble_size) sum_{m=1}^ensemble_size log p(y|x,theta_m).
```
The Gibbs cross entropy approximates the average cross entropy of a single
model drawn from the (Gibbs) ensemble.
Args:
labels: tf.Tensor of shape [...].
logits: tf.Tensor of shape [ensemble_size, ..., num_classes].
Returns:
tf.Tensor of shape [...].
"""
labels = tf.cast(labels, tf.int32)
logits = tf.convert_to_tensor(logits)
nll = tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.broadcast_to(labels[tf.newaxis, ...], tf.shape(logits)[:-1]),
logits)
return tf.reduce_mean(nll, axis=0)
def main(argv):
del argv # unused arg
if not FLAGS.use_gpu:
raise ValueError('Only GPU is currently supported.')
if FLAGS.num_cores > 1:
raise ValueError('Only a single accelerator is currently supported.')
tf.enable_v2_behavior()
tf.random.set_seed(FLAGS.seed)
tf.io.gfile.makedirs(FLAGS.output_dir)
batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size
dataset_test = utils.ImageNetInput(
is_training=False,
data_dir=FLAGS.data_dir,
batch_size=FLAGS.per_core_batch_size,
use_bfloat16=False).input_fn()
test_datasets = {'clean': dataset_test}
corruption_types, max_intensity = utils.load_corrupted_test_info()
for name in corruption_types:
for intensity in range(1, max_intensity + 1):
dataset_name = '{0}_{1}'.format(name, intensity)
test_datasets[dataset_name] = utils.load_corrupted_test_dataset(
name=name,
intensity=intensity,
batch_size=FLAGS.per_core_batch_size,
drop_remainder=True,
use_bfloat16=False)
model = deterministic_model.resnet50(input_shape=(224, 224, 3),
num_classes=NUM_CLASSES)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Search for checkpoints from their index file; then remove the index suffix.
ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
'**/*.index'))
ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
ensemble_size = len(ensemble_filenames)
logging.info('Ensemble size: %s', ensemble_size)
logging.info('Ensemble number of weights: %s',
ensemble_size * model.count_params())
logging.info('Ensemble filenames: %s', str(ensemble_filenames))
checkpoint = tf.train.Checkpoint(model=model)
# Write model predictions to files.
num_datasets = len(test_datasets)
for m, ensemble_filename in enumerate(ensemble_filenames):
checkpoint.restore(ensemble_filename)
for n, (name, test_dataset) in enumerate(test_datasets.items()):
filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
filename = os.path.join(FLAGS.output_dir, filename)
if not tf.io.gfile.exists(filename):
logits = []
test_iterator = iter(test_dataset)
for _ in range(steps_per_eval):
features, _ = next(test_iterator) # pytype: disable=attribute-error
logits.append(model(features, training=False))
logits = tf.concat(logits, axis=0)
with tf.io.gfile.GFile(filename, 'w') as f:
np.save(f, logits.numpy())
percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
'Dataset {:d}/{:d}'.format(percent,
m + 1,
ensemble_size,
n + 1,
num_datasets))
logging.info(message)
metrics = {
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': ed.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
}
corrupt_metrics = {}
for name in test_datasets:
corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
corrupt_metrics['test/accuracy_{}'.format(name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(
name)] = ed.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
# Evaluate model predictions.
for n, (name, test_dataset) in enumerate(test_datasets.items()):
logits_dataset = []
for m in range(ensemble_size):
filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
filename = os.path.join(FLAGS.output_dir, filename)
with tf.io.gfile.GFile(filename, 'rb') as f:
logits_dataset.append(np.load(f))
logits_dataset = tf.convert_to_tensor(logits_dataset)
test_iterator = iter(test_dataset)
for step in range(steps_per_eval):
_, labels = next(test_iterator) # pytype: disable=attribute-error
logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
labels = tf.cast(tf.reshape(labels, [-1]), tf.int32)
negative_log_likelihood = tf.reduce_mean(
ensemble_negative_log_likelihood(labels, logits))
per_probs = tf.nn.softmax(logits)
probs = tf.reduce_mean(per_probs, axis=0)
if name == 'clean':
gibbs_ce = tf.reduce_mean(gibbs_cross_entropy(labels, logits))
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].update_state(labels, probs)
else:
corrupt_metrics['test/nll_{}'.format(name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(name)].update_state(
labels, probs)
message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
(n + 1) / num_datasets, n + 1, num_datasets))
logging.info(message)
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types,
max_intensity,
FLAGS.alexnet_errors_path)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
logging.info('Metrics: %s', total_results)
if __name__ == '__main__':
app.run(main)
|
add_identity
|
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
# MASKED: add_identity function (lines 34-66)
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
|
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
| 34 | 66 |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
|
add_account_identity
|
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
# MASKED: add_account_identity function (lines 85-111)
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
|
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
| 85 | 111 |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
|
get_default_account
|
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
# MASKED: get_default_account function (lines 114-131)
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
|
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
| 114 | 131 |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
|
del_account_identity
|
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
# MASKED: del_account_identity function (lines 134-147)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
|
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
| 134 | 147 |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
|
list_identities
|
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
# MASKED: list_identities function (lines 150-165)
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
|
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
| 150 | 165 |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
|
list_accounts_for_identity
|
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
|
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
# MASKED: list_accounts_for_identity function (lines 168-185)
|
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
| 168 | 185 |
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Mario Lassnig, <[email protected]>, 2012-2015, 2017
# - Vincent Garonne, <[email protected]>, 2012-2013
# - Thomas Beermann, <[email protected]>, 2014
# - Hannes Hansen, <[email protected]>, 2019
# - Ruturaj Gujar <[email protected]>, 2019
#
# PY3K COMPATIBLE
import hashlib
import os
import six
from base64 import b64encode
from re import match
from sqlalchemy.exc import IntegrityError
from rucio.common import exception
from rucio.core.account import account_exists
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session
@transactional_session
def add_identity(identity, type, email, password=None, session=None):
"""
Creates a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml)
:param email: The Email address associated with the identity.
:param password: If type==userpass, this sets the password.
:param session: The database session in use.
"""
if type == IdentityType.USERPASS and password is None:
raise exception.IdentityError('You must provide a password!')
new_id = models.Identity()
new_id.update({'identity': identity, 'identity_type': type, 'email': email})
if type == IdentityType.USERPASS and password is not None:
salt = os.urandom(255) # make sure the salt has the length of the hash
if six.PY3:
decoded_salt = b64encode(salt).decode()
salted_password = ('%s%s' % (decoded_salt, password)).encode()
else:
salted_password = '%s%s' % (salt, str(password))
password = hashlib.sha256(salted_password).hexdigest() # hash it
new_id.update({'salt': salt, 'password': password, 'email': email})
try:
new_id.save(session=session)
except IntegrityError as e:
if match('.*IntegrityError.*1062.*Duplicate entry.*for key.*', e.args[0]):
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
raise exception.DatabaseException(str(e))
@transactional_session
def del_identity(identity, type, session=None):
"""
Deletes a user identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
"""
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
id.delete(session=session)
@transactional_session
def add_account_identity(identity, type, account, email, default=False, password=None, session=None):
"""
Adds a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, ssh, saml).
:param account: The account name.
:param email: The Email address associated with the identity.
:param default: If True, the account should be used by default with the provided identity.
:param password: Password if type is userpass.
:param session: The database session in use.
"""
if not account_exists(account, session=session):
raise exception.AccountNotFound('Account \'%s\' does not exist.' % account)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
if id is None:
add_identity(identity=identity, type=type, email=email, password=password, session=session)
id = session.query(models.Identity).filter_by(identity=identity, identity_type=type).first()
iaa = models.IdentityAccountAssociation(identity=id.identity, identity_type=id.identity_type, account=account)
try:
iaa.save(session=session)
except IntegrityError:
raise exception.Duplicate('Identity pair \'%s\',\'%s\' already exists!' % (identity, type))
@read_session
def get_default_account(identity, type, session=None):
"""
Retrieves the default account mapped to an identity.
:param identity: The identity key name. For example, x509DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session to use.
:returns: The default account name, None otherwise.
"""
tmp = session.query(models.IdentityAccountAssociation).filter_by(identity=identity,
identity_type=type,
is_default=True).first()
if tmp is None:
raise exception.IdentityError('There is no default account for identity (%s, %s)' % (identity, type))
return tmp.account
@transactional_session
def del_account_identity(identity, type, account, session=None):
"""
Removes a membership association between identity and account.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param account: The account name.
:param session: The database session in use.
"""
aid = session.query(models.IdentityAccountAssociation).filter_by(identity=identity, identity_type=type, account=account).first()
if aid is None:
raise exception.IdentityError('Identity (\'%s\',\'%s\') does not exist!' % (identity, type))
aid.delete(session=session)
@read_session
def list_identities(session=None, **kwargs):
"""
Returns a list of all identities.
:param session: The database session in use.
returns: A list of all identities.
"""
id_list = []
for id in session.query(models.Identity).order_by(models.Identity.identity):
id_list.append((id.identity, id.identity_type))
return id_list
@read_session
def list_accounts_for_identity(identity, type, session=None):
"""
Returns a list of all accounts for an identity.
:param identity: The identity key name. For example x509 DN, or a username.
:param type: The type of the authentication (x509, gss, userpass, saml).
:param session: The database session in use.
returns: A list of all accounts for the identity.
"""
account_list = []
for account, in session.query(models.IdentityAccountAssociation.account).filter_by(identity=identity, identity_type=type):
account_list.append(account)
return account_list
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.